Compare commits

..

1 Commits

Author SHA1 Message Date
Ankit Nayan
0db4073e94 chore: different ticker interval for active user
(cherry picked from commit 215ea8d819)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-08 21:25:47 +00:00
3018 changed files with 28336 additions and 252276 deletions

9
.github/CODEOWNERS vendored
View File

@@ -1,10 +1,7 @@
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners # CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
# Owners are automatically requested for review for PRs that changes code # Owners are automatically requested for review for PRs that changes code
# that they own. # that they own.
* @ankitnayan
/frontend/ @YounixM /frontend/ @palashgdev @pranshuchittora
/frontend/src/container/MetricsApplication @srikanthccv
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
/deploy/ @prashant-shahi /deploy/ @prashant-shahi
/sample-apps/ @prashant-shahi /pkg/query-service/ @srikanthccv
.github @prashant-shahi

2
.github/config.yml vendored
View File

@@ -17,7 +17,7 @@ newPRWelcomeComment: >
# Comment to be posted to on pull requests merged by a first time user # Comment to be posted to on pull requests merged by a first time user
firstPRMergeComment: > firstPRMergeComment: >
Congrats on merging your first pull request! Congrats on merging your first pull request!
![minion-party](https://i.imgur.com/Xlg59lP.gif) ![minion-party](https://i.imgur.com/Xlg59lP.gif)
We here at SigNoz are proud of you! 🥳 We here at SigNoz are proud of you! 🥳

View File

@@ -1,17 +0,0 @@
### Summary
<!-- ✍️ A clear and concise description...-->
#### Related Issues / PR's
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
#### Screenshots
NA
<!-- ✍️ Add screenshots of before and after changes where applicable-->
#### Affected Areas and Manually Tested Areas
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->

View File

@@ -12,31 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies - name: Install dependencies
run: cd frontend && yarn install run: cd frontend && yarn install
- name: Run ESLint - name: Run ESLint
@@ -55,15 +31,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Run tests
shell: bash
run: |
make test
- name: Build query-service image - name: Build query-service image
shell: bash shell: bash
run: | run: |
@@ -73,11 +41,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build EE query-service image - name: Build EE query-service image
shell: bash shell: bash
run: | run: |

View File

@@ -39,11 +39,11 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v1
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
@@ -54,7 +54,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v2 uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell. # Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl # 📚 https://git.io/JvXDl
@@ -68,4 +68,4 @@ jobs:
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v1

View File

@@ -7,7 +7,12 @@ jobs:
lint-commits: lint-commits:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2.3.1
with: with:
# we actually need "github.event.pull_request.commits + 1" commit
fetch-depth: 0 fetch-depth: 0
- uses: wagoid/commitlint-github-action@v5 - uses: actions/setup-node@v2.1.0
# or just "yarn" if you depend on "@commitlint/cli" already
- run: yarn add @commitlint/cli
- run: yarn add @commitlint/config-conventional
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD

View File

@@ -12,11 +12,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout Codebase - name: Checkout Codebase
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
repository: signoz/gh-bot repository: signoz/gh-bot
- name: Use Node v16 - name: Use Node v16
uses: actions/setup-node@v4 uses: actions/setup-node@v2
with: with:
node-version: 16 node-version: 16
- name: Setup Cache & Install Dependencies - name: Setup Cache & Install Dependencies

View File

@@ -15,8 +15,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: 'Checkout Repository' - name: 'Checkout Repository'
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: 'Dependency Review' - name: 'Dependency Review'
with: with:
fail-on-severity: high fail-on-severity: high
uses: actions/dependency-review-action@v3 uses: actions/dependency-review-action@v2

View File

@@ -13,12 +13,7 @@ jobs:
DOCKER_TAG: pull-${{ github.event.number }} DOCKER_TAG: pull-${{ github.event.number }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build query-service image - name: Build query-service image
env: env:
@@ -42,7 +37,7 @@ jobs:
kubectl create ns sample-application kubectl create ns sample-application
# apply hotrod k8s manifest file # apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY # wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
@@ -62,7 +57,7 @@ jobs:
--set frontend.service.type=LoadBalancer \ --set frontend.service.type=LoadBalancer \
--set queryService.image.tag=$DOCKER_TAG \ --set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG --set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images # get pods, services and the container images
kubectl get pods -n platform kubectl get pods -n platform
kubectl get svc -n platform kubectl get svc -n platform
@@ -70,18 +65,16 @@ jobs:
- name: Kick off a sample-app workload - name: Kick off a sample-app workload
run: | run: |
# start the locust swarm # start the locust swarm
kubectl --namespace sample-application run strzal --image=djbingham/curl \ kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \ --restart='OnFailure' -i --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node - name: Get short commit SHA and display tunnel URL
id: get-subdomain id: get-subdomain
run: | run: |
subdomain="pr-$(git rev-parse --short HEAD)" subdomain="pr-$(git rev-parse --short HEAD)"
echo "URL for tunnelling: https://$subdomain.loca.lt" echo "URL for tunnelling: https://$subdomain.loca.lt"
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT echo "::set-output name=subdomain::$subdomain"
worker_ip="$(curl -4 -s ipconfig.io/ip)"
echo "Worker node IP address: $worker_ip"
- name: Start tunnel - name: Start tunnel
env: env:

View File

@@ -1,31 +0,0 @@
name: Jest Coverage - changed files
on:
pull_request:
branches: develop
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/develop"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch
run: git fetch origin ${{ github.event.pull_request.head.ref }}
- run: |
git checkout ${{ github.event.pull_request.head.sha }}
- uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Install dependencies
run: cd frontend && npm install -g yarn && yarn
- name: npm run test:changedsince
run: cd frontend && npm run i18n:generate-hash && npm run test:changedsince

View File

@@ -9,8 +9,8 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: "16.x" node-version: "16.x"
- name: Install dependencies - name: Install dependencies

View File

@@ -5,7 +5,7 @@ name: VerifyIssue
on: on:
pull_request: pull_request:
types: [edited, opened] types: [edited, synchronize, opened, reopened]
check_run: check_run:
jobs: jobs:
@@ -14,6 +14,7 @@ jobs:
name: Ensure Pull Request has a linked issue. name: Ensure Pull Request has a linked issue.
steps: steps:
- name: Verify Linked Issue - name: Verify Linked Issue
uses: srikanthccv/verify-linked-issue-action@v0.71 uses: hattan/verify-linked-issue-action@v1.1.0
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -14,27 +14,23 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1
with: with:
version: latest version: latest
- name: Login to DockerHub - name: Login to DockerHub
uses: docker/login-action@v3 uses: docker/login-action@v1
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2 - uses: benjlevesque/short-sha@v1.2
id: short-sha id: short-sha
- name: Get branch name - name: Get branch name
id: branch-name id: branch-name
uses: tj-actions/branch-names@v7.0.7 uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment - name: Set docker tag environment
run: | run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
@@ -46,11 +42,6 @@ jobs:
else else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image - name: Build and push docker image
run: make build-push-query-service run: make build-push-query-service
@@ -58,27 +49,23 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1
with: with:
version: latest version: latest
- name: Login to DockerHub - name: Login to DockerHub
uses: docker/login-action@v3 uses: docker/login-action@v1
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2 - uses: benjlevesque/short-sha@v1.2
id: short-sha id: short-sha
- name: Get branch name - name: Get branch name
id: branch-name id: branch-name
uses: tj-actions/branch-names@v7.0.7 uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment - name: Set docker tag environment
run: | run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
@@ -90,11 +77,6 @@ jobs:
else else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image - name: Build and push docker image
run: make build-push-ee-query-service run: make build-push-ee-query-service
@@ -102,7 +84,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Install dependencies - name: Install dependencies
working-directory: frontend working-directory: frontend
run: yarn install run: yarn install
@@ -115,19 +97,19 @@ jobs:
run: npm run lint run: npm run lint
continue-on-error: true continue-on-error: true
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1
with: with:
version: latest version: latest
- name: Login to DockerHub - name: Login to DockerHub
uses: docker/login-action@v3 uses: docker/login-action@v1
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2 - uses: benjlevesque/short-sha@v1.2
id: short-sha id: short-sha
- name: Get branch name - name: Get branch name
id: branch-name id: branch-name
uses: tj-actions/branch-names@v7.0.7 uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment - name: Set docker tag environment
run: | run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
@@ -141,58 +123,3 @@ jobs:
fi fi
- name: Build and push docker image - name: Build and push docker image
run: make build-push-frontend run: make build-push-frontend
image-build-and-push-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend

View File

@@ -12,12 +12,6 @@ on:
jobs: jobs:
update_release_draft: update_release_draft:
permissions:
# write permission is required to create a github release
contents: write
# write permission is required for autolabeler
# otherwise, read permission is required at least
pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
# (Optional) GitHub Enterprise requires GHE_HOST variable set # (Optional) GitHub Enterprise requires GHE_HOST variable set

View File

@@ -8,15 +8,9 @@ jobs:
remove: remove:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Remove label ok-to-test from PR - name: Remove label
uses: buildsville/add-remove-label@v2.0.0 uses: buildsville/add-remove-label@v1
with: with:
label: ok-to-test label: ok-to-test
type: remove type: remove
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- name: Remove label testing-deploy from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

25
.github/workflows/repo-stats.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
on:
schedule:
# Run this once per day, towards the end of the day for keeping the most
# recent data point most meaningful (hours are interpreted in UTC).
- cron: "0 8 * * *"
workflow_dispatch: # Allow for running this manually.
jobs:
j1:
name: repostats
runs-on: ubuntu-latest
steps:
- name: run-ghrs
uses: jgehrcke/github-repo-stats@v1.1.0
with:
# Define the stats repository (the repo to fetch
# stats for and to generate the report for).
# Remove the parameter when the stats repository
# and the data repository are the same.
repository: signoz/signoz
# Set a GitHub API token that can read the stats
# repository, and that can push to the data
# repository (which this workflow file lives in),
# to store data and the report files.
ghtoken: ${{ github.token }}

View File

@@ -3,7 +3,7 @@ on:
pull_request: pull_request:
branches: branches:
- main - main
- develop - v*
paths: paths:
- 'frontend/**' - 'frontend/**'
defaults: defaults:
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Sonar analysis - name: Sonar analysis
@@ -24,3 +24,4 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

View File

@@ -1,54 +0,0 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
on:
push:
branches:
- develop
jobs:
deploy:
name: Deploy latest develop branch to staging
runs-on: ubuntu-latest
environment: staging
permissions:
contents: 'read'
id-token: 'write'
steps:
- id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: 'sdk'
uses: 'google-github-actions/setup-gcloud@v2'
- name: 'ssh'
shell: bash
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
run: |
read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
docker system prune --force
docker pull signoz/signoz-otel-collector:main
docker pull signoz/signoz-schema-migrator:main
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-testing
EOF
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -1,55 +0,0 @@
name: testing-deployment
# Trigger deployment only on testing-deploy label on pull request
on:
pull_request:
types: [labeled]
jobs:
deploy:
name: Deploy PR branch to testing
runs-on: ubuntu-latest
environment: testing
if: ${{ github.event.label.name == 'testing-deploy' }}
permissions:
contents: 'read'
id-token: 'write'
steps:
- id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: 'sdk'
uses: 'google-github-actions/setup-gcloud@v2'
- name: 'ssh'
shell: bash
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
run: |
read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout develop
git pull
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}
git checkout --track origin/${GITHUB_BRANCH}
make build-ee-query-service-amd64
make build-frontend-amd64
make run-testing
EOF
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

22
.gitignore vendored
View File

@@ -1,5 +1,7 @@
node_modules node_modules
yarn.lock
package.json
deploy/docker/environment_tiny/common_test deploy/docker/environment_tiny/common_test
frontend/node_modules frontend/node_modules
@@ -37,7 +39,7 @@ frontend/src/constants/env.ts
**/locust-scripts/__pycache__/ **/locust-scripts/__pycache__/
**/__debug_bin **/__debug_bin
.env frontend/*.env
pkg/query-service/signoz.db pkg/query-service/signoz.db
pkg/query-service/tests/test-deploy/data/ pkg/query-service/tests/test-deploy/data/
@@ -47,23 +49,7 @@ ee/query-service/signoz.db
ee/query-service/tests/test-deploy/data/ ee/query-service/tests/test-deploy/data/
# local data # local data
*.backup
*.db *.db
/deploy/docker/clickhouse-setup/data/ /deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/ /deploy/docker-swarm/clickhouse-setup/data/
bin/ bin/
*/query-service/queries.active
# e2e
e2e/node_modules/
e2e/test-results/
e2e/playwright-report/
e2e/blob-report/
e2e/playwright/.cache/
e2e/.auth
# go
vendor/
**/main/**

View File

@@ -80,7 +80,7 @@ Before sending us a pull request, please ensure that,
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s): **Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
* 1⃣ First PR should include the overall structure of the new component: * 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc... * Readme, configuration, interfaces or base classes, etc...
@@ -215,26 +215,9 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑 # 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)** [**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
## 4.1 Prerequisites ## 4.1 To run ClickHouse setup (recommended for local development)
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory, - Clone the SigNoz repository and cd into signoz directory,
``` ```
@@ -338,7 +321,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:** **5.1.1 To install the HotROD sample app:**
```bash ```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \ curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash | HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
``` ```
@@ -361,7 +344,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
**5.1.4 To delete the HotROD sample app:** **5.1.4 To delete the HotROD sample app:**
```bash ```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \ curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash | HOTROD_NAMESPACE=sample-application bash
``` ```

122
Makefile
View File

@@ -8,7 +8,6 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1 DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants. # Internal variables or constants.
FRONTEND_DIRECTORY ?= frontend FRONTEND_DIRECTORY ?= frontend
@@ -16,15 +15,15 @@ QUERY_SERVICE_DIRECTORY ?= pkg/query-service
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
LOCAL_GOOS ?= $(shell go env GOOS)
GOOS ?= $(shell go env GOOS) LOCAL_GOARCH ?= $(shell go env GOARCH)
GOARCH ?= $(shell go env GOARCH)
GOPATH ?= $(shell go env GOPATH)
REPONAME ?= signoz REPONAME ?= signoz
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION)) DOCKER_TAG ?= latest
FRONTEND_DOCKER_IMAGE ?= frontend FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service QUERY_SERVICE_DOCKER_IMAGE ?= query-service
DEV_BUILD ?= ""
# Build-time Go variables # Build-time Go variables
PACKAGE?=go.signoz.io/signoz PACKAGE?=go.signoz.io/signoz
@@ -38,84 +37,44 @@ LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildV
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO} DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service all: build-push-frontend build-push-query-service
# Steps to build static files of frontend
build-frontend-static:
@echo "------------------"
@echo "--> Building frontend static files"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
rm -rf build && \
CI=1 yarn install && \
yarn build && \
ls -l build
# Steps to build and push docker image of frontend # Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend .PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline) # Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64: build-frontend-static build-frontend-amd64:
@echo "------------------" @echo "------------------"
@echo "--> Building frontend docker image for amd64" @echo "--> Building frontend docker image for amd64"
@echo "------------------" @echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \ @cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \ docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" . --build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline) # Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend: build-frontend-static build-push-frontend:
@echo "------------------" @echo "------------------"
@echo "--> Building and pushing frontend docker image" @echo "--> Building and pushing frontend docker image"
@echo "------------------" @echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \ @cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \ docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build static binary of query service
.PHONY: build-query-service-static
build-query-service-static:
@echo "------------------"
@echo "--> Building query-service static binary"
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
fi
.PHONY: build-query-service-static-amd64
build-query-service-static-amd64:
make GOARCH=amd64 build-query-service-static
.PHONY: build-query-service-static-arm64
build-query-service-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
# Steps to build and push docker image of query service # Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service .PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline) # Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64 build-query-service-amd64:
@echo "------------------" @echo "------------------"
@echo "--> Building query-service docker image for amd64" @echo "--> Building query-service docker image for amd64"
@echo "------------------" @echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \ @docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \ --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline) # Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service: build-query-service-static-all build-push-query-service:
@echo "------------------" @echo "------------------"
@echo "--> Building and pushing query-service docker image" @echo "--> Building and pushing query-service docker image"
@echo "------------------" @echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \ @docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
--push --platform linux/arm64,linux/amd64 \ --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Step to build EE docker image of query service in amd64 (used in build pipeline) # Step to build EE docker image of query service in amd64 (used in build pipeline)
@@ -123,14 +82,24 @@ build-ee-query-service-amd64:
@echo "------------------" @echo "------------------"
@echo "--> Building query-service docker image for amd64" @echo "--> Building query-service docker image for amd64"
@echo "------------------" @echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64 @if [ $(DEV_BUILD) != "" ]; then \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
else \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
fi
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline) # Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service: build-push-ee-query-service:
@echo "------------------" @echo "------------------"
@echo "--> Building and pushing query-service docker image" @echo "--> Building and pushing query-service docker image"
@echo "------------------" @echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service @docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup: dev-setup:
mkdir -p /var/lib/signoz mkdir -p /var/lib/signoz
@@ -141,7 +110,7 @@ dev-setup:
@echo "------------------" @echo "------------------"
run-local: run-local:
@docker-compose -f \ @LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \ $(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
up --build -d up --build -d
@@ -150,41 +119,16 @@ down-local:
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \ $(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v down -v
pull-signoz: run-x86:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
run-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d @docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
run-testing: down-x86:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v @docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data: clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \ @docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*" sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
clear-swarm-data: clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \ @docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*" sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
clear-standalone-ch:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
test:
go test ./pkg/query-service/app/metrics/...
go test ./pkg/query-service/cache/...
go test ./pkg/query-service/app/...
go test ./pkg/query-service/app/querier/...
go test ./pkg/query-service/converter/...
go test ./pkg/query-service/formatter/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/rules/...
go test ./pkg/query-service/collectorsimulator/...

View File

@@ -1,94 +1,62 @@
<p align="center"> <p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" /> <img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p> <p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
</p> </p>
<p align="center"> <p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a> <img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a> <img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability"> <a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a> <img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p> </p>
<h3 align="center"> <h3 align="center">
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull; <a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull; <a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a> <a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
</h3> </h3>
## ##
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. Mit SigNoz können Sie Folgendes tun: SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
👉 Visualisieren Sie Metriken, Traces und Logs in einer einzigen Oberfläche. 👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
👉 Sie können Metriken wie die p99-Latenz, Fehlerquoten für Ihre Dienste, externe API-Aufrufe und individuelle Endpunkte anzeigen. 👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
👉 Sie können die Ursache des Problems ermitteln, indem Sie zu den genauen Traces gehen, die das Problem verursachen, und detaillierte Flammenbilder einzelner Anfragetraces anzeigen. 👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
👉 Führen Sie Aggregationen auf Trace-Daten durch, um geschäftsrelevante Metriken zu erhalten. ![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
👉 Filtern und Abfragen von Logs, Erstellen von Dashboards und Benachrichtigungen basierend auf Attributen in den Logs.
👉 Automatische Aufzeichnung von Ausnahmen in Python, Java, Ruby und Javascript.
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
##
### Anwendung Metriken
![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
### Verteiltes Tracing
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
### Log Verwaltung
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
### Infrastruktur Überwachung
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### Exceptions Monitoring
![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
### Alarme
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Werde Teil unserer Slack Community ## Werde Teil unserer Slack Community
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋 Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Funktionen: ## Funktionen:
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden. - Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
- Überblick über Anwendungsmetriken wie RPS, Latenzzeiten des 50tes/90tes/99tes Perzentils und Fehlerquoten. - Übersicht der langsamsten Endpunkte deiner Anwendung.
- Langsamste Endpunkte in Ihrer Anwendung. - Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
- Zeigen Sie genaue Anfragetraces an, um Probleme in nachgelagerten Diensten, langsamen Datenbankabfragen oder Aufrufen von Drittanbieterdiensten wie Zahlungsgateways zu identifizieren. - Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
- Filtern Sie Traces nach Dienstname, Operation, Latenz, Fehler, Tags/Annotationen. - Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
- Führen Sie Aggregationen auf Trace-Daten (Ereignisse/Spans) durch, um geschäftsrelevante Metriken zu erhalten. Beispielsweise können Sie die Fehlerquote und die 99tes Perzentillatenz für `customer_type: gold` oder `deployment_version: v2` oder `external_call: paypal` erhalten. - Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
- Native Unterstützung für OpenTelemetry-Logs, erweiterten Log-Abfrage-Builder und automatische Log-Sammlung aus dem Kubernetes-Cluster.
- Blitzschnelle Log-Analytik ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- End-to-End-Sichtbarkeit der Infrastrukturleistung, Aufnahme von Metriken aus allen Arten von Host-Umgebungen.
- Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Wieso SigNoz? ## Wieso SigNoz?
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten. Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
@@ -97,43 +65,45 @@ Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDo
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten. Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
### Languages supported: ### Unterstützte Programmiersprachen:
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als Bibliothek, mit der Sie Ihre Anwendungen instrumentieren können. Daher wird jedes von OpenTelemetry unterstützte Framework und jede Sprache auch von SignNoz unterstützt. Einige der wichtigsten unterstützten Sprachen sind: Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
- Java - Java
- Python - Python
- NodeJS - NodeJS
- Go - Go
- PHP
- .NET
- Ruby
- Elixir
- Rust
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/ Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
<br /><br /> <br /><br />
## Erste Schritte mit SigNoz <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Erste Schritte mit SigNoz
### Bereitstellung mit Docker ### Bereitstellung mit Docker
Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen. Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/install/troubleshooting/) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt. Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
<p>&nbsp </p> <p>&nbsp </p>
### Deploy in Kubernetes using Helm
### Bereitstellung mit Kubernetes und Helm
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen. Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
<br /><br /> <br /><br />
## Vergleiche mit bekannten Tools <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
### SigNoz vs Prometheus ## Vergleiche mit anderen Lösungen
### SigNoz vs. Prometheus
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich. Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
@@ -141,76 +111,49 @@ Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzels
<p>&nbsp </p> <p>&nbsp </p>
### SigNoz vs Jaeger ### SigNoz vs. Jaeger
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan. Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger: Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an. - Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag `customer_type=premium`, was hingegen mit SigNoz leicht umsetzbar ist. - Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
<p>&nbsp </p>
### SigNoz vs Elastic
- Die Verwaltung von SigNoz-Protokollen basiert auf 'ClickHouse', einem spaltenbasierten OLAP-Datenspeicher, der aggregierte Protokollanalyseabfragen wesentlich effizienter macht.
- 50 % geringerer Ressourcenbedarf im Vergleich zu Elastic während der Aufnahme.
Wir haben Benchmarks veröffentlicht, die Elastic mit SignNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz unterstützt Aggregationen von Daten mit hoher Kardinalität über ein großes Volumen, Loki hingegen nicht.
- SigNoz unterstützt Indizes über Daten mit hoher Kardinalität und hat keine Beschränkungen hinsichtlich der Anzahl der Indizes, während Loki maximale Streams erreicht, wenn ein paar Indizes hinzugefügt werden.
- Das Durchsuchen großer Datenmengen ist in Loki im Vergleich zu SigNoz schwierig und langsam.
Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Zum Projekt beitragen ## Zum Projekt beitragen
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
### Unsere Projektbetreuer Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
#### Backend Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Dokumentation ## Dokumentation
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel. Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
<br /><br /> <br /><br />
## Gemeinschaft <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen. ## Community
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen. Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
Wie immer, Dank an unsere großartigen Mitwirkenden! Wie immer, danke an unsere großartigen Unterstützer!
<a href="https://github.com/signoz/signoz/graphs/contributors"> <a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" /> <img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a> </a>

View File

@@ -5,7 +5,7 @@
</p> </p>
<p align="center"> <p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a> <img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a> <img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability"> <a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a> <img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
@@ -23,7 +23,7 @@
## ##
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can: SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
👉 Visualise Metrics, Traces and Logs in a single pane of glass 👉 Visualise Metrics, Traces and Logs in a single pane of glass
@@ -35,41 +35,19 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
👉 Filter and query logs, build dashboards and alerts based on attributes in logs 👉 Filter and query logs, build dashboards and alerts based on attributes in logs
👉 Record exceptions automatically in Python, Java, Ruby, and Javascript ![screenzy-1670570187181](https://user-images.githubusercontent.com/504541/206646629-829fdafe-70e2-4503-a9c4-1301b7918586.png)
<br />
![screenzy-1670570193901](https://user-images.githubusercontent.com/504541/206646676-a676fdeb-331c-4847-aea9-d1cabf7c47e1.png)
<br />
![screenzy-1670570199026](https://user-images.githubusercontent.com/504541/206646754-28c5534f-0377-428c-9c6e-5c7c0d9dd22d.png)
<br />
![screenzy-1670569888865](https://user-images.githubusercontent.com/504541/206645819-1e865a56-71b4-4fde-80cc-fbdb137a4da5.png)
👉 Easy to set alerts with DIY query builder
### Application Metrics
![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
### Distributed Tracing
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
### Logs Management
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
### Infrastructure Monitoring
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### Exceptions Monitoring
![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
### Alerts
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Join our Slack community ## Join our Slack community
@@ -77,6 +55,7 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Features: ## Features:
@@ -86,13 +65,10 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc - See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
- Filter traces by service name, operation, latency, error, tags/annotations. - Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal` - Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
- Easy to set alerts with DIY query builder
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Why SigNoz? ## Why SigNoz?
@@ -108,7 +84,7 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
- Java - Java
- Python - Python
- Node.js - NodeJS
- Go - Go
- PHP - PHP
- .NET - .NET
@@ -121,14 +97,15 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Getting Started ## Getting Started
### Deploy using Docker ### Deploy using Docker
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues. The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshooting) may be helpful if you face any issues.
<p>&nbsp </p> <p>&nbsp </p>
@@ -139,6 +116,7 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparisons to Familiar Tools ## Comparisons to Familiar Tools
@@ -166,8 +144,6 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient - SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
- 50% lower resource requirement compared to Elastic during ingestion - 50% lower resource requirement compared to Elastic during ingestion
We have published benchmarks comparing Elastic with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p> <p>&nbsp </p>
### SigNoz vs Loki ### SigNoz vs Loki
@@ -176,10 +152,9 @@ We have published benchmarks comparing Elastic with SigNoz. Check it out [here](
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it. - SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz - Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
We have published benchmarks comparing Loki with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Contributing ## Contributing
@@ -199,16 +174,14 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
#### Frontend #### Frontend
- [Palash Gupta](https://github.com/palashgdev) - [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### DevOps #### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi) - [Prashant Shahi](https://github.com/prashant-shahi)
- [Dhawal Sanghvi](https://github.com/dhawal1248)
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Documentation ## Documentation
@@ -216,6 +189,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Community ## Community

View File

@@ -84,9 +84,9 @@ Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry
### Implantar usando Docker ### Implantar usando Docker
Siga as etapas listadas [aqui](https://signoz.io/docs/install/docker/) para instalar usando o Docker. Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
Esse [guia para solução de problemas](https://signoz.io/docs/install/troubleshooting/) pode ser útil se você enfrentar quaisquer problemas. Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
<p>&nbsp </p> <p>&nbsp </p>

View File

@@ -1,227 +1,170 @@
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" /> <p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">你的应用,并可排查已部署应用的问题,这是一个可替代 DataDog、NewRelic 的开源方案</p> <p align="center">你的应用,并可排查已部署应用的问题,这是一个开源的可替代DataDog、NewRelic方案</p>
</p> </p>
<p align="center"> <p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a> <img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a> <img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability"> <a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a> <img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p> </p>
<h3 align="center">
<a href="https://signoz.io/docs"><b>文档</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a>
<a href="https://signoz.io/slack"><b>Slack 社区</b></a>
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
## ##
SigNoz 帮助开发人员监控应用并排查已部署应用的问题。你可以使用 SigNoz 实现如下能力: SigNoz帮助开发人员监控应用并排查已部署应用的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
👉 在同一块面板上,可视化 Metrics, Traces 和 Logs 内容 👉 你能看到一些性能指标服务、外部api调用、每个终端(endpoint)的p99延迟和错误率
👉 你可以关注服务的 p99 延迟和错误率, 包括外部 API 调用和个别的端点 👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因
👉 你可以找到问题的根因,通过提取相关问题的 traces 日志、单独查看请求 traces 的火焰图详情 👉 聚合trace数据来获得业务相关指标
👉 执行 trace 数据聚合,以获取业务相关的 metrics ![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
<br />
👉 对日志过滤和查询,通过日志的属性建立看板和告警 ![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br />
👉 通过 PythonjavaRuby 和 Javascript 自动记录异常 ![screenzy-1647005040573](https://user-images.githubusercontent.com/504541/157875938-a3d57904-ea6d-4278-b929-bd1408d7f94c.png)
👉 轻松的自定义查询和设置告警
### 应用 Metrics 展示
![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
### 分布式追踪
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
### 日志管理
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
### 基础设施监控
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### 异常监控
![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
### 告警
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
<br /><br /> <br /><br />
## 加入我们 Slack 社区 <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
来 [Slack](https://signoz.io/slack) 和我们打招呼吧 👋 ## 加入我们的Slack社区
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
<br /><br /> <br /><br />
## 特性: <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
- 为 metrics, traces and logs 制定统一的 UI。 无需切换 Prometheus 到 Jaeger 去查找问题,也无需使用想 Elastic 这样的日志工具分开你的 metrics 和 traces ## 功能:
- 默认统计应用的 metrics 数据,像 RPS (每秒请求数) 50th/90th/99th 的分位数延迟数据,还有相关的错误率 - 应用概览指标(metrics)如RPS, p50/p90/p99延迟率分位值错误率等。
- 应用中最慢的终端(endpoint)
- 找到应用中最慢的端点 - 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
- 查看准确的请求跟踪数据,找到下游服务的问题了,比如 DB 慢查询,或者调用第三方的支付网关等 - 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
- 通过 服务名、操作方式、延迟、错误、标签/注释 过滤 traces 数据
- 通过聚合 trace 数据而获得业务相关的 metrics。 比如你可以通过 `customer_type: gold` 或者 `deployment_version: v2` 或者 `external_call: paypal` 获取错误率和 P99 延迟数据
- 原生支持 OpenTelemetry 日志,高级日志查询,自动收集 k8s 相关日志
- 快如闪电的日志分析 ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- 可视化点到点的基础设施性能,提取有所有类型机器的 metrics 数据
- 轻易自定义告警查询
<br /><br /> <br /><br />
## 为什么使用 SigNoz? <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
作为开发者, 我们发现 SaaS 厂商对一些大家想要的小功能都是闭源的,这种行为真的让人有点恼火。 闭源厂商还会在月底给你一张没有明细的巨额账单。 ## 为何选择SigNoz
我们想做一个自托管并且可开源的工具,像 DataDog 和 NewRelic 那样, 为那些担心数据隐私和安全的公司提供第三方服务 作为开发人员我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦闭源厂商通常会给你一份巨额月付账单但不提供足够的透明度你不知道你为哪些功能付费
作为开源的项目,你完全可以自己掌控你的配置、样本和更新。你同样可以基于 SigNoz 拓展特定的业务模块 我们想做一个自服务的开源版本的工具类似于DataDog和NewRelic用于那些对客户数据流入第三方有隐私和安全担忧的厂商
### 支持的编程语言: 开源也让你对配置、采样和正常运行时间有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
我们支持 [OpenTelemetry](https://opentelemetry.io)。作为一个观测你应用的库文件。所以任何 OpenTelemetry 支持的框架和语言,对于 SigNoz 也同样支持。 一些主要支持的语言如下: ### 语言支持
我们支持[OpenTelemetry](https://opentelemetry.io)库你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
- Java - Java
- Python - Python
- NodeJS - NodeJS
- Go - Go
- PHP
- .NET
- Ruby
- Elixir
- Rust
你可以在这里找到全部支持的语言列表 - https://opentelemetry.io/docs/ 你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
<br /><br /> <br /><br />
## 让我们开始吧 <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
### 使用 Docker 部署 ## 入门
请一步步跟随 [这里](https://signoz.io/docs/install/docker/) 通过 docker 来安装。
这个 [排障说明书](https://signoz.io/docs/install/troubleshooting/) 可以帮助你解决碰到的问题。 ### 使用Docker部署
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
<p>&nbsp </p> <p>&nbsp </p>
### 使用 Helm 在 Kubernetes 部署
请一步步跟随 [这里](https://signoz.io/docs/deployment/helm_chart) 通过 helm 来安装 ### 使用Helm在Kubernetes上部署
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
<br /><br /> <br /><br />
## 比较相似的工具 <img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## 与其他方案的比较
### SigNoz vs Prometheus ### SigNoz vs Prometheus
Prometheus 是一个针对 metrics 监控的强大工具。但是如果你无缝的切换 metricstraces 查询,你当前大概率需要在 Prometheus Jaeger 之间切换 如果你只是需要监控指标(metrics)那Prometheus是不错的如果你无缝的metricstraces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好
我们的目标是提供一个客户观测 metricstraces 整合的 UI。就像 SaaS 供应商 DataDog,它提供很多 jaeger 缺失的功能,比如针对 traces 过滤功能和聚合功能。 我们的目标是metricstraces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合这是目前Jaeger缺失的功能。
<p>&nbsp </p> <p>&nbsp </p>
### SigNoz vs Jaeger ### SigNoz vs Jaeger
Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metrics, traceslogs 所有的观测 Jaeger只做分布式追踪(distributed tracing)SigNoz则支持metrics,traces,logs ,即可视化的三大支柱
而且, SigNoz 相较于 Jaeger 拥有更对的高级功能: 并且SigNoz有一些Jaeger没有的高级功能
- Jaegar UI 不能提供任何基于 traces 的 metrics 查询和过滤 - Jaegar UI无法在traces或过滤的traces上展示metrics。
- Jaeger不能对过滤的traces做聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
- Jaeger 不能针对过滤的 traces 做聚合。 比如, p99 延迟的请求有个标签是 customer_type='premium'。 而这些在 SigNoz 可以轻松做到。
<p>&nbsp </p>
### SigNoz vs Elastic
- SigNoz 的日志管理是基于 ClickHouse 实现的,可以使日志的聚合更加高效,因为它是基于 OLAP 的数据仓储。
- 与 Elastic 相比,可以节省 50% 的资源成本
我们已经公布了 Elastic 和 SigNoz 的性能对比。 请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz 支持大容量高基数的聚合,但是 loki 是不支持的。
- SigNoz 支持索引的高基数查询,并且对索引没有数量限制,而 Loki 会在添加部分索引后到达最大上限。
- 相较于 SigNozLoki 在搜索大量数据下既困难又缓慢。
我们已经发布了基准测试对比 Loki 和 SigNoz 性能。请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## 贡献 ## 贡献
我们 ❤️ 你的贡献,无论大小。 请先阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 再开始给 SigNoz 做贡献。
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们 我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献
### 项目维护人员 还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
#### 后端 ### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan) - [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain) - [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv) - [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish) - [Vishal Sharma](https://github.com/makeavish)
#### 前端 #### Frontend
- [Palash Gupta](https://github.com/palashgdev) - [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### 运维开发 #### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi) - [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## 文档 ## 文档
你可以通过 https://signoz.io/docs/ 找到相关文档。如果你需要阐述问题或者发现一些确实的事件, 通过标签 `documentation` 提交 Github 问题。或者通过 slack 社区频道 文档在这里:https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失请在Github里发一个问题并使用标签 `documentation` 或者在社区stack频道里告诉我们
<br /><br /> <br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## 社区 ## 社区
加入 [slack 社区](https://signoz.io/slack)了解更多关于分布式踪、可观测性系统 。或者与 SigNoz 其他用户和贡献者交流。 加入[slack community](https://signoz.io/slack)了解更多关于分布式踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
如果你有任何想法、问题或者任何反馈, 请通过 [Github Discussions](https://github.com/SigNoz/signoz/discussions) 分享。 如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们
不管怎么样,感谢这个项目的所有贡献者! 最后,感谢我们这些优秀的贡献者们。
<a href="https://github.com/signoz/signoz/graphs/contributors"> <a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" /> <img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a> </a>

View File

@@ -27,6 +27,12 @@ For x86 chip (amd):
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
``` ```
For Mac with Apple chip (arm):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
```
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI. the data generated from hotrod in SigNoz UI.
@@ -58,7 +64,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
```sh ```sh
kubectl create ns sample-application kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
``` ```
To generate load: To generate load:
@@ -66,7 +72,7 @@ To generate load:
```sh ```sh
kubectl -n sample-application run strzal --image=djbingham/curl \ kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \ --restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
``` ```
To stop load: To stop load:

View File

@@ -7,21 +7,9 @@
</default> </default>
<s3> <s3>
<type>s3</type> <type>s3</type>
<!-- For S3 cold storage, <endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id> <access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key> <secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3> </s3>
</disks> </disks>
<policies> <policies>

View File

@@ -1,7 +1,7 @@
version: "3.9" version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:24.1.2-alpine image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true tty: true
deploy: deploy:
restart_policy: restart_policy:
@@ -16,14 +16,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3" max-file: "3"
healthcheck: healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
@@ -33,17 +26,15 @@ x-clickhouse-defaults: &clickhouse-defaults
soft: 262144 soft: 262144
hard: 262144 hard: 262144
x-db-depend: &db-depend x-clickhouse-depend: &clickhouse-depend
depends_on: depends_on:
- clickhouse - clickhouse
- otel-collector-migrator
# - clickhouse-2 # - clickhouse-2
# - clickhouse-3 # - clickhouse-3
services: services:
zookeeper-1: zookeeper-1:
image: bitnami/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.0
hostname: zookeeper-1 hostname: zookeeper-1
user: root user: root
ports: ports:
@@ -133,7 +124,7 @@ services:
# - ./data/clickhouse-3/:/var/lib/clickhouse/ # - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager: alertmanager:
image: signoz/alertmanager:0.23.5 image: signoz/alertmanager:0.23.0-0.2
volumes: volumes:
- ./data/alertmanager:/data - ./data/alertmanager:/data
command: command:
@@ -146,12 +137,8 @@ services:
condition: on-failure condition: on-failure
query-service: query-service:
image: signoz/query-service:0.46.0 image: signoz/query-service:0.13.0
command: command: ["-config=/root/config/prometheus.yml"]
[
"-config=/root/config/prometheus.yml",
# "--prefer-delta=true"
]
# ports: # ports:
# - "6060:6060" # pprof port # - "6060:6060" # pprof port
# - "8080:8080" # query-service port # - "8080:8080" # query-service port
@@ -160,7 +147,7 @@ services:
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/ - ./data/signoz/:/var/lib/signoz/
environment: environment:
- ClickHouseUrl=tcp://clickhouse:9000 - ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards - DASHBOARDS_PATH=/root/config/dashboards
@@ -169,24 +156,17 @@ services:
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm - DEPLOYMENT_TYPE=docker-swarm
healthcheck: healthcheck:
test: test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
<<: *db-depend <<: *clickhouse-depend
frontend: frontend:
image: signoz/frontend:0.46.0 image: signoz/frontend:0.13.0
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@@ -199,26 +179,19 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector: otel-collector:
image: signoz/signoz-otel-collector:0.88.24 image: signoz/signoz-otel-collector:0.66.1
command: command: ["--config=/etc/otel-collector-config.yaml"]
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs user: root # required for reading docker container logs
volumes: volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro - /var/lib/docker/containers:/var/lib/docker/containers:ro
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}} - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- DOCKER_MULTI_NODE_CLUSTER=false - DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports: ports:
# - "1777:1777" # pprof extension # - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver - "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver - "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics # - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent # - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port # - "9411:9411" # Zipkin port
@@ -231,40 +204,26 @@ services:
mode: global mode: global
restart_policy: restart_policy:
condition: on-failure condition: on-failure
depends_on: <<: *clickhouse-depend
- clickhouse
- otel-collector-migrator
- query-service
otel-collector-migrator: otel-collector-metrics:
image: signoz/signoz-schema-migrator:0.88.24 image: signoz/signoz-otel-collector:0.66.1
deploy: command: ["--config=/etc/otel-collector-metrics-config.yaml"]
restart_policy:
condition: on-failure
delay: 5s
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
logspout:
image: "gliderlabs/logspout:v3.2.14"
volumes: volumes:
- /etc/hostname:/etc/host_hostname:ro - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
- /var/run/docker.sock:/var/run/docker.sock # ports:
command: syslog+tcp://otel-collector:2255 # - "1777:1777" # pprof extension
depends_on: # - "8888:8888" # OtelCollector internal metrics
- otel-collector # - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
deploy: deploy:
mode: global
restart_policy: restart_policy:
condition: on-failure condition: on-failure
<<: *clickhouse-depend
hotrod: hotrod:
image: jaegertracing/example-hotrod:1.30 image: jaegertracing/example-hotrod:1.30
command: [ "all" ] command: ["all"]
environment: environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
logging: logging:
@@ -273,7 +232,7 @@ services:
max-file: "3" max-file: "3"
load-hotrod: load-hotrod:
image: "signoz/locust:1.2.3" image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
hostname: load-hotrod hostname: load-hotrod
environment: environment:
ATTACKED_HOST: http://hotrod:8080 ATTACKED_HOST: http://hotrod:8080

View File

@@ -1,23 +1,35 @@
receivers: receivers:
tcplog/docker: filelog/dockercontainers:
listen_address: "0.0.0.0:2255" include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators: operators:
- type: regex_parser - type: json_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?' id: parser-docker
timestamp: output: extract_metadata_from_filepath
parse_from: attributes.timestamp timestamp:
layout: '%Y-%m-%dT%H:%M:%S.%LZ' parse_from: attributes.time
- type: move layout: '%Y-%m-%dT%H:%M:%S.%LZ'
from: attributes["body"] - type: regex_parser
to: body id: extract_metadata_from_filepath
- type: remove regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
field: attributes.timestamp parse_from: attributes["log.file.path"]
# please remove names from below if you want to collect logs from them output: parse_body
- type: filter - type: move
id: signoz_logs_filter id: parse_body
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"' from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus: opencensus:
endpoint: 0.0.0.0:55678 endpoint: 0.0.0.0:55678
otlp/spanmetrics:
protocols:
grpc:
endpoint: localhost:12345
otlp: otlp:
protocols: protocols:
grpc: grpc:
@@ -52,9 +64,7 @@ receivers:
- job_name: otel-collector - job_name: otel-collector
static_configs: static_configs:
- targets: - targets:
- localhost:8888 - localhost:8888
labels:
job_name: otel-collector
processors: processors:
batch: batch:
@@ -63,10 +73,10 @@ processors:
timeout: 10s timeout: 10s
resourcedetection: resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure. detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s timeout: 2s
signozspanmetrics/cumulative: signozspanmetrics/prometheus:
metrics_exporter: clickhousemetricswrite metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000 dimensions_cache_size: 100000
dimensions: dimensions:
@@ -74,19 +84,6 @@ processors:
default: default default: default
- name: deployment.environment - name: deployment.environment
default: default default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter: # memory_limiter:
# # 80% of maximum memory up to 2G # # 80% of maximum memory up to 2G
# limit_mib: 1500 # limit_mib: 1500
@@ -102,47 +99,31 @@ processors:
# num_workers: 4 # num_workers: 4
# queue_size: 100 # queue_size: 100
# retry_on_failure: true # retry_on_failure: true
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
enable_exp_histogram: true
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
exporters: exporters:
clickhousetraces: clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite: clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion: resource_to_telemetry_conversion:
enabled: true enabled: true
clickhousemetricswrite/prometheus: prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: 0.0.0.0:8889
# logging: {} # logging: {}
clickhouselogsexporter: clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s timeout: 5s
sending_queue:
queue_size: 100
retry_on_failure:
enabled: true
initial_interval: 5s
max_interval: 30s
max_elapsed_time: 300s
extensions: extensions:
health_check: health_check:
endpoint: 0.0.0.0:13133 endpoint: 0.0.0.0:13133
@@ -159,21 +140,20 @@ service:
pipelines: pipelines:
traces: traces:
receivers: [jaeger, otlp] receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch] processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhousetraces] exporters: [clickhousetraces]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite] exporters: [clickhousemetricswrite]
metrics/generic: metrics/generic:
receivers: [hostmetrics] receivers: [hostmetrics, prometheus]
processors: [resourcedetection, batch] processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite] exporters: [clickhousemetricswrite]
metrics/prometheus: metrics/spanmetrics:
receivers: [prometheus] receivers: [otlp/spanmetrics]
processors: [batch] exporters: [prometheus]
exporters: [clickhousemetricswrite/prometheus]
logs: logs:
receivers: [otlp, tcplog/docker] receivers: [otlp, filelog/dockercontainers]
processors: [batch] processors: [batch]
exporters: [clickhouselogsexporter] exporters: [clickhouselogsexporter]

View File

@@ -0,0 +1,62 @@
receivers:
prometheus:
config:
scrape_configs:
# otel-collector-metrics internal metrics
- job_name: otel-collector-metrics
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s
dns_sd_configs:
- names:
- tasks.otel-collector
type: A
port: 8889
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
exporters:
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof]
pipelines:
metrics:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -1 +0,0 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -22,4 +22,4 @@ rule_files:
scrape_configs: [] scrape_configs: []
remote_read: remote_read:
- url: tcp://clickhouse:9000/signoz_metrics - url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@@ -24,16 +24,8 @@ server {
try_files $uri $uri/ /index.html; try_files $uri $uri/ /index.html;
} }
location ~ ^/api/(v1|v3)/logs/(tail|livetail){ location /api/alertmanager {
proxy_pass http://query-service:8080; proxy_pass http://alertmanager:9093/api/v2;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
} }
location /api { location /api {

View File

@@ -905,8 +905,7 @@
<dictionaries_config>*_dictionary.xml</dictionaries_config> <dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Configuration of user defined executable functions --> <!-- Configuration of user defined executable functions -->
<user_defined_executable_functions_config>*function.xml</user_defined_executable_functions_config> <user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config>
<user_scripts_path>/var/lib/clickhouse/user_scripts/</user_scripts_path>
<!-- Uncomment if you want data to be compressed 30-100% better. <!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse. Don't do that if you just started using ClickHouse.

View File

@@ -7,21 +7,9 @@
</default> </default>
<s3> <s3>
<type>s3</type> <type>s3</type>
<!-- For S3 cold storage, <endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id> <access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key> <secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3> </s3>
</disks> </disks>
<policies> <policies>

View File

@@ -1,21 +0,0 @@
<functions>
<function>
<type>executable</type>
<name>histogramQuantile</name>
<return_type>Float64</return_type>
<argument>
<type>Array(Float64)</type>
<name>buckets</name>
</argument>
<argument>
<type>Array(Float64)</type>
<name>counts</name>
</argument>
<argument>
<type>Float64</type>
<name>quantile</name>
</argument>
<format>CSV</format>
<command>./histogramQuantile</command>
</function>
</functions>

View File

@@ -1,26 +1,9 @@
version: "2.4" version: "2.4"
services: services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
clickhouse: clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine image: clickhouse/clickhouse-server:22.8.8-alpine
container_name: signoz-clickhouse container_name: clickhouse
# ports: # ports:
# - "9000:9000" # - "9000:9000"
# - "8123:8123" # - "8123:8123"
@@ -28,11 +11,8 @@ services:
volumes: volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/ - ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
restart: on-failure restart: on-failure
logging: logging:
options: options:
@@ -40,21 +20,14 @@ services:
max-file: "3" max-file: "3"
healthcheck: healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
alertmanager: alertmanager:
container_name: signoz-alertmanager container_name: alertmanager
image: signoz/alertmanager:0.23.5 image: signoz/alertmanager:0.23.0-0.2
volumes: volumes:
- ./data/alertmanager:/data - ./data/alertmanager:/data
depends_on: depends_on:
@@ -65,40 +38,20 @@ services:
- --queryService.url=http://query-service:8085 - --queryService.url=http://query-service:8085
- --storage.path=/data - --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector: otel-collector:
container_name: signoz-otel-collector container_name: otel-collector
image: signoz/signoz-otel-collector:0.88.24 image: signoz/signoz-otel-collector:0.66.1
command: command: ["--config=/etc/otel-collector-config.yaml"]
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
# user: root # required for reading docker container logs # user: root # required for reading docker container logs
volumes: volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
ports: ports:
# - "1777:1777" # pprof extension # - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver - "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver - "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics # - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent # - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port # - "9411:9411" # Zipkin port
@@ -111,21 +64,22 @@ services:
depends_on: depends_on:
clickhouse: clickhouse:
condition: service_healthy condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout: otel-collector-metrics:
image: "gliderlabs/logspout:v3.2.14" container_name: otel-collector-metrics
container_name: signoz-logspout image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes: volumes:
- /etc/hostname:/etc/host_hostname:ro - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
- /var/run/docker.sock:/var/run/docker.sock # ports:
command: syslog+tcp://otel-collector:2255 # - "1777:1777" # pprof extension
depends_on: # - "8888:8888" # OtelCollector internal metrics
- otel-collector # - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
hotrod: hotrod:
image: jaegertracing/example-hotrod:1.30 image: jaegertracing/example-hotrod:1.30
@@ -134,12 +88,12 @@ services:
options: options:
max-size: 50m max-size: 50m
max-file: "3" max-file: "3"
command: [ "all" ] command: ["all"]
environment: environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod: load-hotrod:
image: "signoz/locust:1.2.3" image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod container_name: load-hotrod
hostname: load-hotrod hostname: load-hotrod
environment: environment:

View File

@@ -4,12 +4,12 @@ services:
query-service: query-service:
hostname: query-service hostname: query-service
build: build:
context: "../../../" context: "../../../pkg/query-service"
dockerfile: "./pkg/query-service/Dockerfile" dockerfile: "./Dockerfile"
args: args:
LDFLAGS: "" LDFLAGS: ""
TARGETPLATFORM: "${GOOS}/${GOARCH}" TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
container_name: signoz-query-service container_name: query-service
environment: environment:
- ClickHouseUrl=tcp://clickhouse:9000 - ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
@@ -22,24 +22,13 @@ services:
- ./prometheus.yml:/root/config/prometheus.yml - ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/ - ./data/signoz/:/var/lib/signoz/
command: command: ["-config=/root/config/prometheus.yml"]
[
"-config=/root/config/prometheus.yml",
# "--prefer-delta=true"
]
ports: ports:
- "6060:6060" - "6060:6060"
- "8080:8080" - "8080:8080"
restart: on-failure restart: on-failure
healthcheck: healthcheck:
test: test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
@@ -52,9 +41,9 @@ services:
context: "../../../frontend" context: "../../../frontend"
dockerfile: "./Dockerfile" dockerfile: "./Dockerfile"
args: args:
TARGETOS: "${GOOS}" TARGETOS: "${LOCAL_GOOS}"
TARGETPLATFORM: "${GOARCH}" TARGETPLATFORM: "${LOCAL_GOARCH}"
container_name: signoz-frontend container_name: frontend
environment: environment:
- FRONTEND_API_ENDPOINT=http://query-service:8080 - FRONTEND_API_ENDPOINT=http://query-service:8080
restart: on-failure restart: on-failure

View File

@@ -1,307 +0,0 @@
version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"-gateway-url=https://api.staging.signoz.cloud"
# "--prefer-delta=true"
]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -2,8 +2,7 @@ version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab image: clickhouse/clickhouse-server:22.8.8-alpine
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true tty: true
depends_on: depends_on:
- zookeeper-1 - zookeeper-1
@@ -15,14 +14,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3" max-file: "3"
healthcheck: healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
@@ -32,22 +24,20 @@ x-clickhouse-defaults: &clickhouse-defaults
soft: 262144 soft: 262144
hard: 262144 hard: 262144
x-db-depend: &db-depend x-clickhouse-depend: &clickhouse-depend
depends_on: depends_on:
clickhouse: clickhouse:
condition: service_healthy condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2: # clickhouse-2:
# condition: service_healthy # condition: service_healthy
# clickhouse-3: # clickhouse-3:
# condition: service_healthy # condition: service_healthy
services: services:
zookeeper-1: zookeeper-1:
image: bitnami/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.0
container_name: signoz-zookeeper-1 container_name: zookeeper-1
hostname: zookeeper-1 hostname: zookeeper-1
user: root user: root
ports: ports:
@@ -64,7 +54,7 @@ services:
# zookeeper-2: # zookeeper-2:
# image: bitnami/zookeeper:3.7.0 # image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2 # container_name: zookeeper-2
# hostname: zookeeper-2 # hostname: zookeeper-2
# user: root # user: root
# ports: # ports:
@@ -81,7 +71,7 @@ services:
# zookeeper-3: # zookeeper-3:
# image: bitnami/zookeeper:3.7.0 # image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3 # container_name: zookeeper-3
# hostname: zookeeper-3 # hostname: zookeeper-3
# user: root # user: root
# ports: # ports:
@@ -98,7 +88,7 @@ services:
clickhouse: clickhouse:
<<: *clickhouse-defaults <<: *clickhouse-defaults
container_name: signoz-clickhouse container_name: clickhouse
hostname: clickhouse hostname: clickhouse
ports: ports:
- "9000:9000" - "9000:9000"
@@ -107,15 +97,13 @@ services:
volumes: volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/ - ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2: # clickhouse-2:
# <<: *clickhouse-defaults # <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2 # container_name: clickhouse-2
# hostname: clickhouse-2 # hostname: clickhouse-2
# ports: # ports:
# - "9001:9000" # - "9001:9000"
@@ -124,16 +112,13 @@ services:
# volumes: # volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/ # - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3: # clickhouse-3:
# <<: *clickhouse-defaults # <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3 # container_name: clickhouse-3
# hostname: clickhouse-3 # hostname: clickhouse-3
# ports: # ports:
# - "9002:9000" # - "9002:9000"
@@ -142,15 +127,12 @@ services:
# volumes: # volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/ # - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager: alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5} image: signoz/alertmanager:0.23.0-0.2
container_name: signoz-alertmanager
volumes: volumes:
- ./data/alertmanager:/data - ./data/alertmanager:/data
depends_on: depends_on:
@@ -161,16 +143,12 @@ services:
- --queryService.url=http://query-service:8085 - --queryService.url=http://query-service:8085
- --storage.path=/data - --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service: query-service:
image: signoz/query-service:${DOCKER_TAG:-0.46.0} image: signoz/query-service:0.13.0
container_name: signoz-query-service container_name: query-service
command: command: ["-config=/root/config/prometheus.yml"]
[
"-config=/root/config/prometheus.yml"
# "--prefer-delta=true"
]
# ports: # ports:
# - "6060:6060" # pprof port # - "6060:6060" # pprof port
# - "8080:8080" # query-service port # - "8080:8080" # query-service port
@@ -179,7 +157,7 @@ services:
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/ - ./data/signoz/:/var/lib/signoz/
environment: environment:
- ClickHouseUrl=tcp://clickhouse:9000 - ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards - DASHBOARDS_PATH=/root/config/dashboards
@@ -189,22 +167,15 @@ services:
- DEPLOYMENT_TYPE=docker-standalone-amd - DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure restart: on-failure
healthcheck: healthcheck:
test: test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
<<: *db-depend <<: *clickhouse-depend
frontend: frontend:
image: signoz/frontend:${DOCKER_TAG:-0.46.0} image: signoz/frontend:0.13.0
container_name: signoz-frontend container_name: frontend
restart: on-failure restart: on-failure
depends_on: depends_on:
- alertmanager - alertmanager
@@ -214,43 +185,20 @@ services:
volumes: volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector: otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24} image: signoz/signoz-otel-collector:0.66.1
container_name: signoz-otel-collector command: ["--config=/etc/otel-collector-config.yaml"]
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs user: root # required for reading docker container logs
volumes: volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro - /var/lib/docker/containers:/var/lib/docker/containers:ro
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false - DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports: ports:
# - "1777:1777" # pprof extension # - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver - "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver - "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics # - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent # - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port # - "9411:9411" # Zipkin port
@@ -260,38 +208,34 @@ services:
# - "55678:55678" # OpenCensus receiver # - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension # - "55679:55679" # zPages extension
restart: on-failure restart: on-failure
depends_on: <<: *clickhouse-depend
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout: otel-collector-metrics:
image: "gliderlabs/logspout:v3.2.14" image: signoz/signoz-otel-collector:0.66.1
container_name: signoz-logspout command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes: volumes:
- /etc/hostname:/etc/host_hostname:ro - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
- /var/run/docker.sock:/var/run/docker.sock # ports:
command: syslog+tcp://otel-collector:2255 # - "1777:1777" # pprof extension
depends_on: # - "8888:8888" # OtelCollector internal metrics
- otel-collector # - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure restart: on-failure
<<: *clickhouse-depend
hotrod: hotrod:
image: jaegertracing/example-hotrod:1.30 image: jaegertracing/example-hotrod:1.30
container_name: hotrod container_name: hotrod
logging: logging:
options: options:
max-size: 50m max-size: 50m
max-file: "3" max-file: "3"
command: [ "all" ] command: ["all"]
environment: environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod: load-hotrod:
image: "signoz/locust:1.2.3" image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod container_name: load-hotrod
hostname: load-hotrod hostname: load-hotrod
environment: environment:

View File

@@ -1,64 +0,0 @@
<clickhouse>
<logger>
<!-- Possible levels [1]:
- none (turns off logging)
- fatal
- critical
- error
- warning
- notice
- information
- debug
- trace
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
-->
<level>information</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
<!-- Rotation policy
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
-->
<size>1000M</size>
<count>10</count>
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
</logger>
<listen_host>0.0.0.0</listen_host>
<max_connections>4096</max_connections>
<keeper_server>
<tcp_port>9181</tcp_port>
<!-- Must be unique among all keeper serves -->
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<min_session_timeout_ms>10000</min_session_timeout_ms>
<session_timeout_ms>100000</session_timeout_ms>
<raft_logs_level>information</raft_logs_level>
<compress_logs>false</compress_logs>
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
</coordination_settings>
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
<hostname_checks_enabled>true</hostname_checks_enabled>
<raft_configuration>
<server>
<id>1</id>
<!-- Internal port and hostname -->
<hostname>clickhouses-keeper-1</hostname>
<port>9234</port>
</server>
<!-- Add more servers here -->
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@@ -1,23 +1,35 @@
receivers: receivers:
tcplog/docker: filelog/dockercontainers:
listen_address: "0.0.0.0:2255" include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators: operators:
- type: regex_parser - type: json_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?' id: parser-docker
timestamp: output: extract_metadata_from_filepath
parse_from: attributes.timestamp timestamp:
layout: '%Y-%m-%dT%H:%M:%S.%LZ' parse_from: attributes.time
- type: move layout: '%Y-%m-%dT%H:%M:%S.%LZ'
from: attributes["body"] - type: regex_parser
to: body id: extract_metadata_from_filepath
- type: remove regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
field: attributes.timestamp parse_from: attributes["log.file.path"]
# please remove names from below if you want to collect logs from them output: parse_body
- type: filter - type: move
id: signoz_logs_filter id: parse_body
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"' from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus: opencensus:
endpoint: 0.0.0.0:55678 endpoint: 0.0.0.0:55678
otlp/spanmetrics:
protocols:
grpc:
endpoint: localhost:12345
otlp: otlp:
protocols: protocols:
grpc: grpc:
@@ -52,19 +64,15 @@ receivers:
- job_name: otel-collector - job_name: otel-collector
static_configs: static_configs:
- targets: - targets:
- localhost:8888 - localhost:8888
labels:
job_name: otel-collector
processors: processors:
batch: batch:
send_batch_size: 10000 send_batch_size: 10000
send_batch_max_size: 11000 send_batch_max_size: 11000
timeout: 10s timeout: 10s
signozspanmetrics/cumulative: signozspanmetrics/prometheus:
metrics_exporter: clickhousemetricswrite metrics_exporter: prometheus
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000 dimensions_cache_size: 100000
dimensions: dimensions:
@@ -72,19 +80,6 @@ processors:
default: default default: default
- name: deployment.environment - name: deployment.environment
default: default default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter: # memory_limiter:
# # 80% of maximum memory up to 2G # # 80% of maximum memory up to 2G
# limit_mib: 1500 # limit_mib: 1500
@@ -102,33 +97,8 @@ processors:
# retry_on_failure: true # retry_on_failure: true
resourcedetection: resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure. detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
enable_exp_histogram: true
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
extensions: extensions:
health_check: health_check:
@@ -140,21 +110,29 @@ extensions:
exporters: exporters:
clickhousetraces: clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite: clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion: resource_to_telemetry_conversion:
enabled: true enabled: true
clickhousemetricswrite/prometheus: prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: 0.0.0.0:8889
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s
# logging: {} # logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
retry_on_failure:
enabled: true
initial_interval: 5s
max_interval: 30s
max_elapsed_time: 300s
service: service:
telemetry: telemetry:
metrics: metrics:
@@ -166,21 +144,20 @@ service:
pipelines: pipelines:
traces: traces:
receivers: [jaeger, otlp] receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch] processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhousetraces] exporters: [clickhousetraces]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite] exporters: [clickhousemetricswrite]
metrics/generic: metrics/generic:
receivers: [hostmetrics] receivers: [hostmetrics, prometheus]
processors: [resourcedetection, batch] processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite] exporters: [clickhousemetricswrite]
metrics/prometheus: metrics/spanmetrics:
receivers: [prometheus] receivers: [otlp/spanmetrics]
processors: [batch] exporters: [prometheus]
exporters: [clickhousemetricswrite/prometheus]
logs: logs:
receivers: [otlp, tcplog/docker] receivers: [otlp, filelog/dockercontainers]
processors: [batch] processors: [batch]
exporters: [clickhouselogsexporter] exporters: [clickhouselogsexporter]

View File

@@ -0,0 +1,67 @@
receivers:
otlp:
protocols:
grpc:
http:
prometheus:
config:
scrape_configs:
# otel-collector-metrics internal metrics
- job_name: otel-collector-metrics
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s
static_configs:
- targets:
- otel-collector:8889
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- zpages
- pprof
pipelines:
metrics:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -1 +0,0 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -22,4 +22,4 @@ rule_files:
scrape_configs: [] scrape_configs: []
remote_read: remote_read:
- url: tcp://clickhouse:9000/signoz_metrics - url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@@ -1,237 +0,0 @@
package main
import (
"bufio"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
)
// NOTE: executable must be built with target OS and architecture set to linux/amd64
// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
// The following code is adapted from the following source:
// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
type bucket struct {
upperBound float64
count float64
}
// buckets implements sort.Interface.
type buckets []bucket
func (b buckets) Len() int { return len(b) }
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
// buckets will be sorted by upperBound by this function (i.e. no sorting
// needed before calling this function). The quantile value is interpolated
// assuming a linear distribution within a bucket. However, if the quantile
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
// returned. A natural lower bound of 0 is assumed if the upper bound of the
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
// happens linearly between 0 and the upper bound of the lowest bucket.
// However, if the lowest bucket has an upper bound less or equal 0, this upper
// bound is returned if the quantile falls into the lowest bucket.
//
// There are a number of special cases (once we have a way to report errors
// happening during evaluations of AST functions, we should report those
// explicitly):
//
// If 'buckets' has 0 observations, NaN is returned.
//
// If 'buckets' has fewer than 2 elements, NaN is returned.
//
// If the highest bucket is not +Inf, NaN is returned.
//
// If q==NaN, NaN is returned.
//
// If q<0, -Inf is returned.
//
// If q>1, +Inf is returned.
func bucketQuantile(q float64, buckets buckets) float64 {
if math.IsNaN(q) {
return math.NaN()
}
if q < 0 {
return math.Inf(-1)
}
if q > 1 {
return math.Inf(+1)
}
sort.Sort(buckets)
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
return math.NaN()
}
buckets = coalesceBuckets(buckets)
ensureMonotonic(buckets)
if len(buckets) < 2 {
return math.NaN()
}
observations := buckets[len(buckets)-1].count
if observations == 0 {
return math.NaN()
}
rank := q * observations
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
if b == len(buckets)-1 {
return buckets[len(buckets)-2].upperBound
}
if b == 0 && buckets[0].upperBound <= 0 {
return buckets[0].upperBound
}
var (
bucketStart float64
bucketEnd = buckets[b].upperBound
count = buckets[b].count
)
if b > 0 {
bucketStart = buckets[b-1].upperBound
count -= buckets[b-1].count
rank -= buckets[b-1].count
}
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
}
// coalesceBuckets merges buckets with the same upper bound.
//
// The input buckets must be sorted.
func coalesceBuckets(buckets buckets) buckets {
last := buckets[0]
i := 0
for _, b := range buckets[1:] {
if b.upperBound == last.upperBound {
last.count += b.count
} else {
buckets[i] = last
last = b
i++
}
}
buckets[i] = last
return buckets[:i+1]
}
// The assumption that bucket counts increase monotonically with increasing
// upperBound may be violated during:
//
// * Recording rule evaluation of histogram_quantile, especially when rate()
// has been applied to the underlying bucket timeseries.
// * Evaluation of histogram_quantile computed over federated bucket
// timeseries, especially when rate() has been applied.
//
// This is because scraped data is not made available to rule evaluation or
// federation atomically, so some buckets are computed with data from the
// most recent scrapes, but the other buckets are missing data from the most
// recent scrape.
//
// Monotonicity is usually guaranteed because if a bucket with upper bound
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
// have counted all c1 observations and perhaps more, so that c >= c1.
//
// Randomly interspersed partial sampling breaks that guarantee, and rate()
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
// monotonicity is broken. It is exacerbated by rate() because under normal
// operation, cumulative counting of buckets will cause the bucket counts to
// diverge such that small differences from missing samples are not a problem.
// rate() removes this divergence.)
//
// bucketQuantile depends on that monotonicity to do a binary search for the
// bucket with the φ-quantile count, so breaking the monotonicity
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
//
// As a somewhat hacky solution until ingestion is atomic per scrape, we
// calculate the "envelope" of the histogram buckets, essentially removing
// any decreases in the count between successive buckets.
func ensureMonotonic(buckets buckets) {
max := buckets[0].count
for i := 1; i < len(buckets); i++ {
switch {
case buckets[i].count > max:
max = buckets[i].count
case buckets[i].count < max:
buckets[i].count = max
}
}
}
// End of copied code.
func readLines() []string {
r := bufio.NewReader(os.Stdin)
bytes := []byte{}
lines := []string{}
for {
line, isPrefix, err := r.ReadLine()
if err != nil {
break
}
bytes = append(bytes, line...)
if !isPrefix {
str := strings.TrimSpace(string(bytes))
if len(str) > 0 {
lines = append(lines, str)
bytes = []byte{}
}
}
}
if len(bytes) > 0 {
lines = append(lines, string(bytes))
}
return lines
}
func main() {
lines := readLines()
for _, text := range lines {
// Example input
// "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
// bounds - counts - quantile
parts := strings.Split(text, "\",")
var bucketNumbers []float64
// Strip the ends with square brackets
text = parts[0][2 : len(parts[0])-1]
// Parse the bucket bounds
for _, num := range strings.Split(text, ",") {
num = strings.TrimSpace(num)
number, err := strconv.ParseFloat(num, 64)
if err == nil {
bucketNumbers = append(bucketNumbers, number)
}
}
var bucketCounts []float64
// Strip the ends with square brackets
text = parts[1][2 : len(parts[1])-1]
// Parse the bucket counts
for _, num := range strings.Split(text, ",") {
num = strings.TrimSpace(num)
number, err := strconv.ParseFloat(num, 64)
if err == nil {
bucketCounts = append(bucketCounts, number)
}
}
// Parse the quantile
q, err := strconv.ParseFloat(parts[2], 64)
var b buckets
if err == nil {
for i := 0; i < len(bucketNumbers); i++ {
b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
}
}
fmt.Println(bucketQuantile(q, b))
}
}

View File

@@ -24,16 +24,8 @@ server {
try_files $uri $uri/ /index.html; try_files $uri $uri/ /index.html;
} }
location ~ ^/api/(v1|v3)/logs/(tail|livetail){ location /api/alertmanager {
proxy_pass http://query-service:8080; proxy_pass http://alertmanager:9093/api/v2;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
} }
location /api { location /api {

View File

@@ -36,9 +36,9 @@ is_mac() {
[[ $OSTYPE == darwin* ]] [[ $OSTYPE == darwin* ]]
} }
is_arm64(){ # is_arm64(){
[[ `uname -m` == 'arm64' || `uname -m` == 'aarch64' ]] # [[ `uname -m` == 'arm64' ]]
} # }
check_os() { check_os() {
if is_mac; then if is_mac; then
@@ -48,20 +48,10 @@ check_os() {
return return
fi fi
if is_arm64; then
arch="arm64"
arch_official="aarch64"
else
arch="amd64"
arch_official="x86_64"
fi
platform=$(uname -s | tr '[:upper:]' '[:lower:]')
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')" os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in case "$os_name" in
Ubuntu*|Pop!_OS) Ubuntu*)
desired_os=1 desired_os=1
os="ubuntu" os="ubuntu"
package_manager="apt-get" package_manager="apt-get"
@@ -91,11 +81,6 @@ check_os() {
os="centos" os="centos"
package_manager="yum" package_manager="yum"
;; ;;
Rocky*)
desired_os=1
os="centos"
package_manager="yum"
;;
SLES*) SLES*)
desired_os=1 desired_os=1
os="sles" os="sles"
@@ -135,7 +120,7 @@ check_ports_occupied() {
echo "+++++++++++ ERROR ++++++++++++++++++++++" echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports." echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/" echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "++++++++++++++++++++++++++++++++++++++++" echo "++++++++++++++++++++++++++++++++++++++++"
echo "" echo ""
exit 1 exit 1
@@ -153,7 +138,7 @@ install_docker() {
$apt_cmd install software-properties-common gnupg-agent $apt_cmd install software-properties-common gnupg-agent
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add - curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
$sudo_cmd add-apt-repository \ $sudo_cmd add-apt-repository \
"deb [arch=$arch] https://download.docker.com/linux/$os $(lsb_release -cs) stable" "deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
$apt_cmd update $apt_cmd update
echo "Installing docker" echo "Installing docker"
$apt_cmd install docker-ce docker-ce-cli containerd.io $apt_cmd install docker-ce docker-ce-cli containerd.io
@@ -188,20 +173,12 @@ install_docker() {
} }
compose_version () {
local compose_version
compose_version="$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\" -f4)"
echo "${compose_version:-v2.18.1}"
}
install_docker_compose() { install_docker_compose() {
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
if [[ ! -f /usr/bin/docker-compose ]];then if [[ ! -f /usr/bin/docker-compose ]];then
echo "++++++++++++++++++++++++" echo "++++++++++++++++++++++++"
echo "Installing docker-compose" echo "Installing docker-compose"
compose_url="https://github.com/docker/compose/releases/download/$(compose_version)/docker-compose-$platform-$arch_official" $sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
echo "Downloading docker-compose from $compose_url"
$sudo_cmd curl -L "$compose_url" -o /usr/local/bin/docker-compose
$sudo_cmd chmod +x /usr/local/bin/docker-compose $sudo_cmd chmod +x /usr/local/bin/docker-compose
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose $sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
echo "docker-compose installed!" echo "docker-compose installed!"
@@ -246,7 +223,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values # The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)" status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
if [[ status_code -eq 200 ]]; then if [[ status_code -eq 200 ]]; then
break break
else else
@@ -267,7 +244,7 @@ bye() { # Prints a friendly good bye message and exits the script.
echo "" echo ""
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a" echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/" # echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack" echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++" echo "++++++++++++++++++++++++++++++++++++++++"
@@ -295,7 +272,7 @@ request_sudo() {
echo -e "\n\n🙇 We will need sudo access to complete the installation." echo -e "\n\n🙇 We will need sudo access to complete the installation."
if (( $EUID != 0 )); then if (( $EUID != 0 )); then
sudo_cmd="sudo" sudo_cmd="sudo"
echo -e "Please enter your sudo password, if prompted." echo -e "Please enter your sudo password, if prompt."
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null # $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then # if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
# echo "Need sudo privileges to proceed with the installation." # echo "Need sudo privileges to proceed with the installation."
@@ -518,7 +495,7 @@ if [[ $status_code -ne 200 ]]; then
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a" echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/" echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://signoz.io/slack" echo "or reach us on SigNoz for support https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++" echo "++++++++++++++++++++++++++++++++++++++++"
@@ -534,15 +511,13 @@ else
echo "" echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301" echo -e "🟢 Your frontend is running on http://localhost:3301"
echo "" echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v" echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
echo "" echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++" echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo "" echo ""
echo "👉 Need help in Getting Started?" echo "👉 Need help Getting Started?"
echo -e "Join us on Slack https://signoz.io/slack" echo -e "Join us on Slack https://signoz.io/slack"
echo "" echo ""
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!" echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"

View File

@@ -1,14 +0,0 @@
{
"name": "e2e",
"version": "1.0.0",
"main": "index.js",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.22.0",
"@types/node": "^20.9.2"
},
"scripts": {},
"dependencies": {
"dotenv": "8.2.0"
}
}

View File

@@ -1,46 +0,0 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
dotenv.config();
export default defineConfig({
testDir: "./tests",
fullyParallel: true,
forbidOnly: !!process.env.CI,
name: "Signoz E2E",
retries: process.env.CI ? 2 : 0,
reporter: process.env.CI ? "github" : "list",
preserveOutput: "always",
updateSnapshots: "all",
quiet: false,
testMatch: ["**/*.spec.ts"],
use: {
trace: "on-first-retry",
baseURL:
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
},
projects: [
{ name: "setup", testMatch: /.*\.setup\.ts/ },
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
// Use prepared auth state.
storageState: ".auth/user.json",
},
dependencies: ["setup"],
},
],
});

View File

@@ -1,37 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import dotenv from "dotenv";
dotenv.config();
const authFile = ".auth/user.json";
test("E2E Login Test", async ({ page }) => {
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
const signup = "Monitor your applications. Find what is causing issues.";
const el = await page.locator(`text=${signup}`);
expect(el).toBeVisible();
await page
.locator("id=loginEmail")
.type(
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
);
await page.getByText("Next").click();
await page
.locator('input[id="currentPassword"]')
.fill(
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
);
await page.locator('button[data-attr="signup"]').click();
await expect(page).toHaveURL(ROUTES.APPLICATION);
await page.context().storageState({ path: authFile });
});

View File

@@ -1,10 +0,0 @@
export const SERVICE_TABLE_HEADERS = {
APPLICATION: "Applicaton",
P99LATENCY: "P99 latency (in ms)",
ERROR_RATE: "Error Rate (% of total)",
OPS_PER_SECOND: "Operations Per Second",
};
export const DATA_TEST_IDS = {
NEW_DASHBOARD_BTN: "create-new-dashboard",
};

View File

@@ -1,40 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
test("Basic Navigation Check across different resources", async ({ page }) => {
// route to services page and check if the page renders fine with BE contract
await Promise.all([
page.goto(ROUTES.APPLICATION),
page.waitForRequest("**/v1/services"),
]);
const p99Latency = page.locator(
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
);
await expect(p99Latency).toBeVisible();
// route to the new trace explorer page and check if the page renders fine
await page.goto(ROUTES.TRACES_EXPLORER);
await page.waitForLoadState("networkidle");
const listViewTable = await page
.locator('div[role="presentation"]')
.isVisible();
expect(listViewTable).toBeTruthy();
// route to the dashboards page and check if the page renders fine
await Promise.all([
page.goto(ROUTES.ALL_DASHBOARD),
page.waitForRequest("**/v1/dashboards"),
]);
const newDashboardBtn = await page
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
.isVisible();
expect(newDashboardBtn).toBeTruthy();
});

View File

@@ -1,46 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.22.0":
version "1.40.0"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
dependencies:
playwright "1.40.0"
"@types/node@^20.9.2":
version "20.9.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
dependencies:
undici-types "~5.26.4"
dotenv@8.2.0:
version "8.2.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
playwright@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
dependencies:
playwright-core "1.40.0"
optionalDependencies:
fsevents "2.3.2"
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==

View File

@@ -1,31 +1,48 @@
FROM golang:1.17-buster AS builder
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
ARG LD_FLAGS
ARG TARGETPLATFORM
ENV CGO_ENABLED=1
ENV GOPATH=/go
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
# Prepare and enter src directory
WORKDIR /go/src/github.com/signoz/signoz
# Add the sources and proceed with build
ADD . .
RUN cd ee/query-service \
&& go build -tags timetzdata -a -o ./bin/query-service \
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
&& chmod +x ./bin/query-service
# use a minimal alpine image # use a minimal alpine image
FROM alpine:3.18.6 FROM alpine:3.7
# Add Maintainer Info # Add Maintainer Info
LABEL maintainer="signoz" LABEL maintainer="signoz"
# define arguments that can be passed during build time
ARG TARGETOS TARGETARCH
# add ca-certificates in case you need them # add ca-certificates in case you need them
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory # set working directory
WORKDIR /root WORKDIR /root
# copy the query-service binary # copy the binary from builder
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
# copy prometheus YAML config # copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# run the binary # run the binary
ENTRYPOINT ["./query-service"] ENTRYPOINT ["./query-service"]
CMD ["-config", "/root/config/prometheus.yml"] CMD ["-config", "../config/prometheus.yml"]
# CMD ["./query-service -config /root/config/prometheus.yml"]
EXPOSE 8080 EXPOSE 8080

View File

@@ -2,44 +2,23 @@ package api
import ( import (
"net/http" "net/http"
"net/http/httputil"
"time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/dao" "go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces" "go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/license" "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
baseapp "go.signoz.io/signoz/pkg/query-service/app" baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/cache"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces" baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules" rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version" "go.signoz.io/signoz/pkg/query-service/version"
) )
type APIHandlerOptions struct { type APIHandlerOptions struct {
DataConnector interfaces.DataConnector DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig AppDao dao.ModelDao
PreferDelta bool RulesManager *rules.Manager
PreferSpanMetrics bool FeatureFlags baseint.FeatureLookup
MaxIdleConns int LicenseManager *license.Manager
MaxOpenConns int
DialTimeout time.Duration
AppDao dao.ModelDao
RulesManager *rules.Manager
UsageManager *usage.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
IntegrationsController *integrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Cache cache.Cache
Gateway *httputil.ReverseProxy
// Querier Influx Interval
FluxInterval time.Duration
} }
type APIHandler struct { type APIHandler struct {
@@ -51,21 +30,10 @@ type APIHandler struct {
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) { func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{ baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector, Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig, AppDao: opts.AppDao,
PerferDelta: opts.PreferDelta, RuleManager: opts.RulesManager,
PreferSpanMetrics: opts.PreferSpanMetrics, FeatureFlags: opts.FeatureFlags})
MaxIdleConns: opts.MaxIdleConns,
MaxOpenConns: opts.MaxOpenConns,
DialTimeout: opts.DialTimeout,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags,
IntegrationsController: opts.IntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
})
if err != nil { if err != nil {
return nil, err return nil, err
@@ -90,107 +58,74 @@ func (ah *APIHandler) LM() *license.Manager {
return ah.opts.LicenseManager return ah.opts.LicenseManager
} }
func (ah *APIHandler) UM() *usage.Manager {
return ah.opts.UsageManager
}
func (ah *APIHandler) AppDao() dao.ModelDao { func (ah *APIHandler) AppDao() dao.ModelDao {
return ah.opts.AppDao return ah.opts.AppDao
} }
func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
return ah.opts.Gateway
}
func (ah *APIHandler) CheckFeature(f string) bool { func (ah *APIHandler) CheckFeature(f string) bool {
err := ah.FF().CheckFeature(f) err := ah.FF().CheckFeature(f)
return err == nil return err == nil
} }
// RegisterRoutes registers routes for this handler on the given router // RegisterRoutes registers routes for this handler on the given router
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) { func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
// note: add ee override methods first // note: add ee override methods first
// routes available only in ee version // routes available only in ee version
router.HandleFunc("/api/v1/licenses", router.HandleFunc("/api/v1/licenses",
am.AdminAccess(ah.listLicenses)). baseapp.AdminAccess(ah.listLicenses)).
Methods(http.MethodGet) Methods(http.MethodGet)
router.HandleFunc("/api/v1/licenses", router.HandleFunc("/api/v1/licenses",
am.AdminAccess(ah.applyLicense)). baseapp.AdminAccess(ah.applyLicense)).
Methods(http.MethodPost) Methods(http.MethodPost)
router.HandleFunc("/api/v1/featureFlags", router.HandleFunc("/api/v1/featureFlags",
am.OpenAccess(ah.getFeatureFlags)). baseapp.OpenAccess(ah.getFeatureFlags)).
Methods(http.MethodGet) Methods(http.MethodGet)
router.HandleFunc("/api/v1/loginPrecheck", router.HandleFunc("/api/v1/loginPrecheck",
am.OpenAccess(ah.precheckLogin)). baseapp.OpenAccess(ah.precheckLogin)).
Methods(http.MethodGet) Methods(http.MethodGet)
// paid plans specific routes // paid plans specific routes
router.HandleFunc("/api/v1/complete/saml", router.HandleFunc("/api/v1/complete/saml",
am.OpenAccess(ah.receiveSAML)). baseapp.OpenAccess(ah.receiveSAML)).
Methods(http.MethodPost) Methods(http.MethodPost)
router.HandleFunc("/api/v1/complete/google", router.HandleFunc("/api/v1/complete/google",
am.OpenAccess(ah.receiveGoogleAuth)). baseapp.OpenAccess(ah.receiveGoogleAuth)).
Methods(http.MethodGet) Methods(http.MethodGet)
router.HandleFunc("/api/v1/orgs/{orgId}/domains", router.HandleFunc("/api/v1/orgs/{orgId}/domains",
am.AdminAccess(ah.listDomainsByOrg)). baseapp.AdminAccess(ah.listDomainsByOrg)).
Methods(http.MethodGet) Methods(http.MethodGet)
router.HandleFunc("/api/v1/domains", router.HandleFunc("/api/v1/domains",
am.AdminAccess(ah.postDomain)). baseapp.AdminAccess(ah.postDomain)).
Methods(http.MethodPost) Methods(http.MethodPost)
router.HandleFunc("/api/v1/domains/{id}", router.HandleFunc("/api/v1/domains/{id}",
am.AdminAccess(ah.putDomain)). baseapp.AdminAccess(ah.putDomain)).
Methods(http.MethodPut) Methods(http.MethodPut)
router.HandleFunc("/api/v1/domains/{id}", router.HandleFunc("/api/v1/domains/{id}",
am.AdminAccess(ah.deleteDomain)). baseapp.AdminAccess(ah.deleteDomain)).
Methods(http.MethodDelete) Methods(http.MethodDelete)
// base overrides // base overrides
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet) router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet) router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost) router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost) router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet) router.HandleFunc("/api/v1/traces/{traceId}", baseapp.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
router.HandleFunc("/api/v2/metrics/query_range", baseapp.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
// PAT APIs ah.APIHandler.RegisterRoutes(router)
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet)
// Gateway
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
ah.APIHandler.RegisterRoutes(router, am)
} }
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
version := version.GetVersion() version := version.GetVersion()
versionResponse := basemodel.GetVersionResponse{ ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
Version: version,
EE: "Y",
SetupCompleted: ah.SetupCompleted,
}
ah.WriteJSON(w, r, versionResponse)
} }

View File

@@ -5,22 +5,21 @@ import (
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseauth "go.signoz.io/signoz/pkg/query-service/auth" baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
) )
func parseRequest(r *http.Request, req interface{}) error { func parseRequest(r *http.Request, req interface{}) error {
defer r.Body.Close() defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body) requestBody, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
return err return err
} }
@@ -35,14 +34,14 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
req := basemodel.LoginRequest{} req := basemodel.LoginRequest{}
err := parseRequest(r, &req) err := parseRequest(r, &req)
if err != nil { if err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
ctx := context.Background() ctx := context.Background()
if req.Email != "" && ah.CheckFeature(model.SSO) { if req.Email != "" && ah.CheckFeature(model.SSO) {
var apierr *basemodel.ApiError var apierr basemodel.BaseApiError
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email) _, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
if apierr != nil && !apierr.IsNil() { if apierr != nil && !apierr.IsNil() {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)
@@ -50,7 +49,7 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
} }
// if all looks good, call auth // if all looks good, call auth
resp, err := baseauth.Login(ctx, &req) resp, err := auth.Login(ctx, &req)
if ah.HandleError(w, err, http.StatusUnauthorized) { if ah.HandleError(w, err, http.StatusUnauthorized) {
return return
} }
@@ -71,56 +70,49 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
var req *baseauth.RegisterRequest var req *baseauth.RegisterRequest
defer r.Body.Close() defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body) requestBody, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
zap.L().Error("received no input in api", zap.Error(err)) zap.S().Errorf("received no input in api\n", err)
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
err = json.Unmarshal(requestBody, &req) err = json.Unmarshal(requestBody, &req)
if err != nil { if err != nil {
zap.L().Error("received invalid user registration request", zap.Error(err)) zap.S().Errorf("received invalid user registration request", zap.Error(err))
RespondError(w, basemodel.BadRequest(fmt.Errorf("failed to register user")), nil) RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
return return
} }
// get invite object // get invite object
invite, err := baseauth.ValidateInvite(ctx, req) invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil { if err != nil || invite == nil {
zap.L().Error("failed to validate invite token", zap.Error(err)) zap.S().Errorf("failed to validate invite token", err)
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
return
}
if invite == nil {
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
RespondError(w, basemodel.BadRequest(basemodel.ErrSignupFailed{}), nil)
return
} }
// get auth domain from email domain // get auth domain from email domain
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email) domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
if apierr != nil { if apierr != nil {
zap.L().Error("failed to get domain from email", zap.Error(apierr)) zap.S().Errorf("failed to get domain from email", apierr)
RespondError(w, basemodel.InternalError(basemodel.ErrSignupFailed{}), nil) RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
} }
precheckResp := &basemodel.PrecheckResponse{ precheckResp := &model.PrecheckResponse{
SSO: false, SSO: false,
IsUser: false, IsUser: false,
} }
if domain != nil && domain.SsoEnabled { if domain != nil && domain.SsoEnabled {
// sso is enabled, create user and respond precheck data // so is enabled, create user and respond precheck data
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true) user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
if apierr != nil { if apierr != nil {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)
return return
} }
var precheckError *basemodel.ApiError var precheckError basemodel.BaseApiError
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl) precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
if precheckError != nil { if precheckError != nil {
@@ -129,8 +121,8 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
} else { } else {
// no-sso, validate password // no-sso, validate password
if err := baseauth.ValidatePassword(req.Password); err != nil { if err := auth.ValidatePassword(req.Password); err != nil {
RespondError(w, basemodel.InternalError(fmt.Errorf("password is not in a valid format")), nil) RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
return return
} }
@@ -155,7 +147,7 @@ func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
inviteObject, err := baseauth.GetInvite(context.Background(), token) inviteObject, err := baseauth.GetInvite(context.Background(), token)
if err != nil { if err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
@@ -198,30 +190,30 @@ func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string)
} }
// receiveGoogleAuth completes google OAuth response and forwards a request // receiveGoogleAuth completes google OAuth response and forwards a request
// to front-end to sign user in // to front-end to sign user in
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
redirectUri := constants.GetDefaultSiteURL() redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background() ctx := context.Background()
if !ah.CheckFeature(model.SSO) { if !ah.CheckFeature(model.SSO) {
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain") zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently) http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return return
} }
q := r.URL.Query() q := r.URL.Query()
if errType := q.Get("error"); errType != "" { if errType := q.Get("error"); errType != "" {
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description"))) zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently) http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
return return
} }
relayState := q.Get("state") relayState := q.Get("state")
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState)) zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
parsedState, err := url.Parse(relayState) parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" { if err != nil || relayState == "" {
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
@@ -229,33 +221,28 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
// upgrade redirect url from the relay state for better accuracy // upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login") redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state. // fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState) domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil { if err != nil {
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
// now that we have domain, use domain to fetch sso settings. // now that we have domain, use domain to fetch sso settings.
// prepare google callback handler using parsedState - // prepare google callback handler using parsedState -
// which contains redirect URL (front-end endpoint) // which contains redirect URL (front-end endpoint)
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState) callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to prepare google oauth provider", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
identity, err := callbackHandler.HandleCallback(r) identity, err := callbackHandler.HandleCallback(r)
if err != nil { if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err)) zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email) nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil { if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err)) zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
@@ -263,21 +250,24 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
http.Redirect(w, r, nextPage, http.StatusSeeOther) http.Redirect(w, r, nextPage, http.StatusSeeOther)
} }
// receiveSAML completes a SAML request and gets user logged in // receiveSAML completes a SAML request and gets user logged in
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request // this is the source url that initiated the login request
redirectUri := constants.GetDefaultSiteURL() redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background() ctx := context.Background()
if !ah.CheckFeature(model.SSO) { if !ah.CheckFeature(model.SSO) {
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain") zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently) http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return return
} }
err := r.ParseForm() err := r.ParseForm()
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
@@ -285,11 +275,11 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// the relay state is sent when a login request is submitted to // the relay state is sent when a login request is submitted to
// Idp. // Idp.
relayState := r.FormValue("RelayState") relayState := r.FormValue("RelayState")
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState)) zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
parsedState, err := url.Parse(relayState) parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" { if err != nil || relayState == "" {
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
@@ -297,46 +287,46 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// upgrade redirect url from the relay state for better accuracy // upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login") redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state. // fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState) domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil { if err != nil {
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
sp, err := domain.PrepareSamlRequest(parsedState) sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err)) zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse")) assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err)) zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
if assertionInfo.WarningInfo.InvalidTime { if assertionInfo.WarningInfo.InvalidTime {
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err)) zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
email := assertionInfo.NameID email := assertionInfo.NameID
if email == "" { if email == "" {
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String())) zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email) nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err)) zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
return return
} }
http.Redirect(w, r, nextPage, http.StatusSeeOther) http.Redirect(w, r, nextPage, http.StatusSeeOther)
} }

View File

@@ -1,52 +0,0 @@
package api
import (
"net/http"
"github.com/gorilla/mux"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, true)
}
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, false)
}
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
// Locking can only be done by the owner of the dashboard
// or an admin
// - Fetch the dashboard
// - Check if the user is the owner or an admin
// - If yes, lock/unlock the dashboard
// - If no, return 403
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}
user := common.GetUserFromContext(r.Context())
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
return
}
// Lock/Unlock the dashboard
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}
ah.Respond(w, "Dashboard updated successfully")
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
@@ -28,12 +27,12 @@ func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
req := model.OrgDomain{} req := model.OrgDomain{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
if err := req.ValidNew(); err != nil { if err := req.ValidNew(); err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
@@ -51,18 +50,18 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
domainIdStr := mux.Vars(r)["id"] domainIdStr := mux.Vars(r)["id"]
domainId, err := uuid.Parse(domainIdStr) domainId, err := uuid.Parse(domainIdStr)
if err != nil { if err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
req := model.OrgDomain{Id: domainId} req := model.OrgDomain{Id: domainId}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
req.Id = domainId req.Id = domainId
if err := req.Valid(nil); err != nil { if err := req.Valid(nil); err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
} }
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil { if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
@@ -78,7 +77,7 @@ func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
domainId, err := uuid.Parse(domainIdStr) domainId, err := uuid.Parse(domainIdStr)
if err != nil { if err != nil {
RespondError(w, basemodel.BadRequest(fmt.Errorf("invalid domain id")), nil) RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
return return
} }

View File

@@ -2,23 +2,9 @@ package api
import ( import (
"net/http" "net/http"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
featureSet, err := ah.FF().GetFeatureFlags() featureSet := ah.FF().GetFeatureFlags()
if err != nil {
ah.HandleError(w, err, http.StatusInternalServerError)
return
}
if ah.opts.PreferSpanMetrics {
for idx := range featureSet {
feature := &featureSet[idx]
if feature.Name == basemodel.UseSpanMetrics {
featureSet[idx].Active = true
}
}
}
ah.Respond(w, featureSet) ah.Respond(w, featureSet)
} }

View File

@@ -1,34 +0,0 @@
package api
import (
"net/http"
"strings"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
)
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) {
rw.WriteHeader(http.StatusNotFound)
return
}
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
if err != nil {
RespondError(rw, err, nil)
return
}
//Create headers
var licenseKey string
if license != nil {
licenseKey = license.Key
}
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
req.Header.Set("X-Consumer-Username", "lid:00000000-0000-0000-0000-000000000000")
req.Header.Set("X-Consumer-Groups", "ns:default")
ah.Gateway().ServeHTTP(rw, req)
}

View File

@@ -4,62 +4,10 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"net/http"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model" "net/http"
"go.uber.org/zap"
) )
type DayWiseBreakdown struct {
Type string `json:"type"`
Breakdown []DayWiseData `json:"breakdown"`
}
type DayWiseData struct {
Timestamp int64 `json:"timestamp"`
Count float64 `json:"count"`
Size float64 `json:"size"`
UnitPrice float64 `json:"unitPrice"`
Quantity float64 `json:"quantity"`
Total float64 `json:"total"`
}
type tierBreakdown struct {
UnitPrice float64 `json:"unitPrice"`
Quantity float64 `json:"quantity"`
TierStart int64 `json:"tierStart"`
TierEnd int64 `json:"tierEnd"`
TierCost float64 `json:"tierCost"`
}
type usageResponse struct {
Type string `json:"type"`
Unit string `json:"unit"`
Tiers []tierBreakdown `json:"tiers"`
DayWiseBreakdown DayWiseBreakdown `json:"dayWiseBreakdown"`
}
type details struct {
Total float64 `json:"total"`
Breakdown []usageResponse `json:"breakdown"`
BaseFee float64 `json:"baseFee"`
BillTotal float64 `json:"billTotal"`
}
type billingDetails struct {
Status string `json:"status"`
Data struct {
BillingPeriodStart int64 `json:"billingPeriodStart"`
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
Details details `json:"details"`
Discount float64 `json:"discount"`
SubscriptionStatus string `json:"subscriptionStatus"`
} `json:"data"`
}
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background()) licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil { if apiError != nil {
@@ -69,18 +17,20 @@ func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
} }
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
var l model.License var l model.License
if err := json.NewDecoder(r.Body).Decode(&l); err != nil { if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
RespondError(w, basemodel.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
if l.Key == "" { if l.Key == "" {
RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil) RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return return
} }
license, apiError := ah.LM().Activate(r.Context(), l.Key)
license, apiError := ah.LM().Activate(ctx, l.Key)
if apiError != nil { if apiError != nil {
RespondError(w, apiError, nil) RespondError(w, apiError, nil)
return return
@@ -88,186 +38,3 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, license) ah.Respond(w, license)
} }
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
}
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
if err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
}
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
licenseKey := r.URL.Query().Get("licenseKey")
if licenseKey == "" {
RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
billingURL := fmt.Sprintf("%s/usage?licenseKey=%s", constants.LicenseSignozIo, licenseKey)
hClient := &http.Client{}
req, err := http.NewRequest("GET", billingURL, nil)
if err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
billingResp, err := hClient.Do(req)
if err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
// decode response body
var billingResponse billingDetails
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
// TODO(srikanthccv):Fetch the current day usage and add it to the response
ah.Respond(w, billingResponse.Data)
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
RespondError(w, apiError, nil)
}
resp := model.Licenses{
TrialStart: -1,
TrialEnd: -1,
OnTrial: false,
WorkSpaceBlock: false,
TrialConvertedToSubscription: false,
GracePeriodEnd: -1,
Licenses: licenses,
}
var currentActiveLicenseKey string
for _, license := range licenses {
if license.IsCurrent {
currentActiveLicenseKey = license.Key
}
}
// For the case when no license is applied i.e community edition
// There will be no trial details or license details
if currentActiveLicenseKey == "" {
ah.Respond(w, resp)
return
}
// Fetch trial details
hClient := &http.Client{}
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
zap.L().Error("Error while creating request for trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid blocking the UI
ah.Respond(w, resp)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
trialResp, err := hClient.Do(req)
if err != nil {
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
defer trialResp.Body.Close()
trialRespBody, err := io.ReadAll(trialResp.Body)
if err != nil || trialResp.StatusCode != http.StatusOK {
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
// decode response body
var trialRespData model.SubscriptionServerResp
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
zap.L().Error("Error while decoding trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
resp.TrialStart = trialRespData.Data.TrialStart
resp.TrialEnd = trialRespData.Data.TrialEnd
resp.OnTrial = trialRespData.Data.OnTrial
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
ah.Respond(w, resp)
}
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
}
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
if err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, basemodel.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
}

View File

@@ -0,0 +1,236 @@
package api
import (
"bytes"
"fmt"
"net/http"
"sync"
"text/template"
"time"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
"go.signoz.io/signoz/pkg/query-service/app/parser"
"go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
querytemplate "go.signoz.io/signoz/pkg/query-service/utils/queryTemplate"
"go.uber.org/zap"
)
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
ah.APIHandler.QueryRangeMetricsV2(w, r)
return
}
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
// prometheus instant query needs same timestamp
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.PROM {
metricsQueryRangeParams.Start = metricsQueryRangeParams.End
}
// round up the end to nearest multiple
if metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER {
end := (metricsQueryRangeParams.End) / 1000
step := metricsQueryRangeParams.Step
metricsQueryRangeParams.End = (end / step * step) * 1000
}
type channelResult struct {
Series []*basemodel.Series
TableName string
Err error
Name string
Query string
}
execClickHouseQueries := func(queries map[string]string) ([]*basemodel.Series, []string, error, map[string]string) {
var seriesList []*basemodel.Series
var tableName []string
ch := make(chan channelResult, len(queries))
var wg sync.WaitGroup
for name, query := range queries {
wg.Add(1)
go func(name, query string) {
defer wg.Done()
seriesList, tableName, err := ah.opts.DataConnector.GetMetricResultEE(r.Context(), query)
for _, series := range seriesList {
series.QueryName = name
}
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
return
}
ch <- channelResult{Series: seriesList, TableName: tableName}
}(name, query)
}
wg.Wait()
close(ch)
var errs []error
errQuriesByName := make(map[string]string)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Query
continue
}
seriesList = append(seriesList, r.Series...)
tableName = append(tableName, r.TableName)
}
if len(errs) != 0 {
return nil, nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
}
return seriesList, tableName, nil, nil
}
execPromQueries := func(metricsQueryRangeParams *basemodel.QueryRangeParamsV2) ([]*basemodel.Series, error, map[string]string) {
var seriesList []*basemodel.Series
ch := make(chan channelResult, len(metricsQueryRangeParams.CompositeMetricQuery.PromQueries))
var wg sync.WaitGroup
for name, query := range metricsQueryRangeParams.CompositeMetricQuery.PromQueries {
if query.Disabled {
continue
}
wg.Add(1)
go func(name string, query *basemodel.PromQuery) {
var seriesList []*basemodel.Series
defer wg.Done()
tmpl := template.New("promql-query")
tmpl, tmplErr := tmpl.Parse(query.Query)
if tmplErr != nil {
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
return
}
var queryBuf bytes.Buffer
tmplErr = tmpl.Execute(&queryBuf, metricsQueryRangeParams.Variables)
if tmplErr != nil {
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
return
}
query.Query = queryBuf.String()
queryModel := basemodel.QueryRangeParams{
Start: time.UnixMilli(metricsQueryRangeParams.Start),
End: time.UnixMilli(metricsQueryRangeParams.End),
Step: time.Duration(metricsQueryRangeParams.Step * int64(time.Second)),
Query: query.Query,
}
promResult, _, err := ah.opts.DataConnector.GetQueryRangeResult(r.Context(), &queryModel)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query.Query}
return
}
matrix, _ := promResult.Matrix()
for _, v := range matrix {
var s basemodel.Series
s.QueryName = name
s.Labels = v.Metric.Copy().Map()
for _, p := range v.Points {
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
}
seriesList = append(seriesList, &s)
}
ch <- channelResult{Series: seriesList}
}(name, query)
}
wg.Wait()
close(ch)
var errs []error
errQuriesByName := make(map[string]string)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Query
continue
}
seriesList = append(seriesList, r.Series...)
}
if len(errs) != 0 {
return nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
}
return seriesList, nil, nil
}
var seriesList []*basemodel.Series
var tableName []string
var err error
var errQuriesByName map[string]string
switch metricsQueryRangeParams.CompositeMetricQuery.QueryType {
case basemodel.QUERY_BUILDER:
runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME)
if runQueries.Err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: runQueries.Err}, nil)
return
}
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(runQueries.Queries)
case basemodel.CLICKHOUSE:
queries := make(map[string]string)
for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries {
if chQuery.Disabled {
continue
}
tmpl := template.New("clickhouse-query")
tmpl, err := tmpl.Parse(chQuery.Query)
if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
return
}
var query bytes.Buffer
// replace go template variables
querytemplate.AssignReservedVars(metricsQueryRangeParams)
err = tmpl.Execute(&query, metricsQueryRangeParams.Variables)
if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
return
}
queries[name] = query.String()
}
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(queries)
case basemodel.PROM:
seriesList, err, errQuriesByName = execPromQueries(metricsQueryRangeParams)
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, errQuriesByName)
return
}
if err != nil {
apiErrObj := &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
return
}
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
len(seriesList) > 1 &&
(metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER ||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.CLICKHOUSE) {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil)
return
}
type ResponseFormat struct {
ResultType string `json:"resultType"`
Result []*basemodel.Series `json:"result"`
TableName []string `json:"tableName"`
}
resp := ResponseFormat{ResultType: "matrix", Result: seriesList, TableName: tableName}
ah.Respond(w, resp)
}

View File

@@ -1,165 +0,0 @@
package api
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func generatePATToken() string {
// Generate a 32-byte random token.
token := make([]byte, 32)
rand.Read(token)
// Encode the token in base64.
encodedToken := base64.StdEncoding.EncodeToString(token)
return encodedToken
}
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := model.CreatePATRequestBody{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, basemodel.BadRequest(err), nil)
return
}
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &basemodel.ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}, nil)
return
}
pat := model.PAT{
Name: req.Name,
Role: req.Role,
ExpiresAt: req.ExpiresInDays,
}
err = validatePATRequest(pat)
if err != nil {
RespondError(w, basemodel.BadRequest(err), nil)
return
}
// All the PATs are associated with the user creating the PAT.
pat.UserID = user.Id
pat.CreatedAt = time.Now().Unix()
pat.UpdatedAt = time.Now().Unix()
pat.LastUsed = 0
pat.Token = generatePATToken()
if pat.ExpiresAt != 0 {
// convert expiresAt to unix timestamp from days
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
}
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
var apierr *basemodel.ApiError
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &pat)
}
func validatePATRequest(req model.PAT) error {
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
return fmt.Errorf("valid role is required")
}
if req.ExpiresAt < 0 {
return fmt.Errorf("valid expiresAt is required")
}
if req.Name == "" {
return fmt.Errorf("valid name is required")
}
return nil
}
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := model.PAT{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, basemodel.BadRequest(err), nil)
return
}
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &basemodel.ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}, nil)
return
}
err = validatePATRequest(req)
if err != nil {
RespondError(w, basemodel.BadRequest(err), nil)
return
}
req.UpdatedByUserID = user.Id
id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now().Unix()
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
var apierr *basemodel.ApiError
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
}
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &basemodel.ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}, nil)
return
}
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
pats, apierr := ah.AppDao().ListPATs(ctx)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, pats)
}
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
id := mux.Vars(r)["id"]
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &basemodel.ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}, nil)
return
}
zap.L().Info("Revoke PAT with id", zap.String("id", id))
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
}

View File

@@ -7,6 +7,6 @@ import (
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
func RespondError(w http.ResponseWriter, apiErr *basemodel.ApiError, data interface{}) { func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
baseapp.RespondError(w, apiErr) baseapp.RespondError(w, apiErr, data)
} }

View File

@@ -2,8 +2,11 @@ package api
import ( import (
"net/http" "net/http"
"strconv"
"go.signoz.io/signoz/ee/query-service/app/db" "go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app" baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap" "go.uber.org/zap"
@@ -12,17 +15,21 @@ import (
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) { if !ah.CheckFeature(basemodel.SmartTraceDetail) {
zap.L().Info("SmartTraceDetail feature is not enabled in this plan") zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
ah.APIHandler.SearchTraces(w, r) ah.APIHandler.SearchTraces(w, r)
return return
} }
searchTracesParams, err := baseapp.ParseSearchTracesParams(r) traceId, spanId, levelUpInt, levelDownInt, err := baseapp.ParseSearchTracesParams(r)
if err != nil { if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, "Error reading params") RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
return return
} }
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm) if err != nil {
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
if ah.HandleError(w, err, http.StatusBadRequest) { if ah.HandleError(w, err, http.StatusBadRequest) {
return return
} }

View File

@@ -22,14 +22,14 @@ import (
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) { func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult")() defer utils.Elapsed("GetMetricResult")()
zap.L().Info("Executing metric result query: ", zap.String("query", query)) zap.S().Infof("Executing metric result query: %s", query)
var hash string var hash string
// If getSubTreeSpans function is used in the clickhouse query // If getSubTreeSpans function is used in the clickhouse query
if strings.Contains(query, "getSubTreeSpans(") { if strings.Index(query, "getSubTreeSpans(") != -1 {
var err error var err error
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash) query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
if err == fmt.Errorf("no spans found for the given query") { if err == fmt.Errorf("No spans found for the given query") {
return nil, "", nil return nil, "", nil
} }
if err != nil { if err != nil {
@@ -38,8 +38,9 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
} }
rows, err := r.conn.Query(ctx, query) rows, err := r.conn.Query(ctx, query)
zap.S().Debug(query)
if err != nil { if err != nil {
zap.L().Error("Error in processing query", zap.Error(err)) zap.S().Debug("Error in processing query: ", err)
return nil, "", fmt.Errorf("error in processing query") return nil, "", fmt.Errorf("error in processing query")
} }
@@ -116,7 +117,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()) groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
} }
default: default:
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName)) zap.S().Errorf("invalid var found in metric builder query result", v, colName)
} }
} }
sort.Strings(groupBy) sort.Strings(groupBy)
@@ -139,7 +140,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
} }
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash) // err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil { // if err != nil {
// zap.L().Error("Error in dropping temporary table: ", err) // zap.S().Error("Error in dropping temporary table: ", err)
// return nil, err // return nil, err
// } // }
if hash == "" { if hash == "" {
@@ -151,7 +152,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) { func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.L().Debug("Executing getSubTreeSpans function") zap.S().Debugf("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;` // str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
@@ -161,29 +162,29 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash) err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil { if err != nil {
zap.L().Error("Error in dropping temporary table", zap.Error(err)) zap.S().Error("Error in dropping temporary table: ", err)
return query, hash, err return query, hash, err
} }
// Create temporary table to store the getSubTreeSpans() results // Create temporary table to store the getSubTreeSpans() results
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash)) zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)") err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil { if err != nil {
zap.L().Error("Error in creating temporary table", zap.Error(err)) zap.S().Error("Error in creating temporary table: ", err)
return query, hash, err return query, hash, err
} }
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput getSpansSubQuery := subtreeInput
// Execute the subTree query // Execute the subTree query
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery)) zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery) err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.L().Info(getSpansSubQuery) // zap.S().Info(getSpansSubQuery)
if err != nil { if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err)) zap.S().Debug("Error in processing sql query: ", err)
return query, hash, fmt.Errorf("error in processing sql query") return query, hash, fmt.Errorf("Error in processing sql query")
} }
var searchScanResponses []basemodel.SearchSpanDBResponseItem var searchScanResponses []basemodel.SearchSpanDBResponseItem
@@ -193,18 +194,18 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable) modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
if len(getSpansSubQueryDBResponses) == 0 { if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("no spans found for the given query") return query, hash, fmt.Errorf("No spans found for the given query")
} }
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery)) zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID) err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil { if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err)) zap.S().Debug("Error in processing sql query: ", err)
return query, hash, fmt.Errorf("error in processing sql query") return query, hash, fmt.Errorf("Error in processing sql query")
} }
// Process model to fetch the spans // Process model to fetch the spans
zap.L().Debug("Processing model to fetch the spans") zap.S().Debugf("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{} searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses { for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem var jsonItem basemodel.SearchSpanResponseItem
@@ -217,17 +218,17 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
} }
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash // Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory // Use map to store pointer to the spans to avoid duplicates and save memory
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses) treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil { if err != nil {
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err)) zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
return query, hash, err return query, hash, err
} }
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash)) statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil { if err != nil {
zap.L().Error("Error in preparing batch statement", zap.Error(err)) zap.S().Error("Error in preparing batch statement: ", err)
return query, hash, err return query, hash, err
} }
for _, span := range treeSearchResponse { for _, span := range treeSearchResponse {
@@ -250,20 +251,19 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
span.Events, span.Events,
) )
if err != nil { if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err)) zap.S().Debug("Error in processing sql query: ", err)
return query, hash, err return query, hash, err
} }
} }
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
err = statement.Send() err = statement.Send()
if err != nil { if err != nil {
zap.L().Error("Error in sending statement", zap.Error(err)) zap.S().Error("Error in sending statement: ", err)
return query, hash, err return query, hash, err
} }
return query, hash, nil return query, hash, nil
} }
//lint:ignore SA4009 return hash is feeded to the query
func processQuery(query string, hash string) (string, string, string) { func processQuery(query string, hash string) (string, string, string) {
re3 := regexp.MustCompile(`getSubTreeSpans`) re3 := regexp.MustCompile(`getSubTreeSpans`)
@@ -323,7 +323,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
spans = append(spans, span) spans = append(spans, span)
} }
zap.L().Debug("Building Tree") zap.S().Debug("Building Tree")
roots, err := buildSpanTrees(&spans) roots, err := buildSpanTrees(&spans)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -333,7 +333,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
// For each root, get the subtree spans // For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses { for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{} targetSpan := &model.SpanForTraceDetails{}
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses))) // zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree // Search target span object in the tree
for _, root := range roots { for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID) targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
@@ -341,7 +341,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
break break
} }
if err != nil { if err != nil {
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err)) zap.S().Error("Error during BreadthFirstSearch(): ", err)
return nil, err return nil, err
} }
} }

View File

@@ -1,8 +1,6 @@
package db package db
import ( import (
"time"
"github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
@@ -17,16 +15,8 @@ type ClickhouseReader struct {
*basechr.ClickHouseReader *basechr.ClickHouseReader
} }
func NewDataConnector( func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
localDB *sqlx.DB, ch := basechr.NewReader(localDB, promConfigPath, lm)
promConfigPath string,
lm interfaces.FeatureLookup,
maxIdleConns int,
maxOpenConns int,
dialTimeout time.Duration,
cluster string,
) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
return &ClickhouseReader{ return &ClickhouseReader{
conn: ch.GetConn(), conn: ch.GetConn(),
appdb: localDB, appdb: localDB,

View File

@@ -13,11 +13,6 @@ import (
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) { func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
var spans []*model.SpanForTraceDetails var spans []*model.SpanForTraceDetails
// if targetSpanId is null or not present then randomly select a span as targetSpanId
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
targetSpanId = payload[0].SpanID
}
// Build a slice of spans from the payload // Build a slice of spans from the payload
for _, spanItem := range payload { for _, spanItem := range payload {
var parentID string var parentID string
@@ -54,14 +49,14 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
break break
} }
if err != nil { if err != nil {
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err)) zap.S().Error("Error during BreadthFirstSearch(): ", err)
return nil, err return nil, err
} }
} }
// If the target span is not found, return span not found error // If the target span is not found, return span not found error
if targetSpan == nil { if targetSpan == nil {
return nil, errors.New("span not found") return nil, errors.New("Span not found")
} }
// Build the final result // Build the final result
@@ -118,9 +113,8 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
} }
searchSpansResult := []basemodel.SearchSpansResult{{ searchSpansResult := []basemodel.SearchSpansResult{{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"}, Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
Events: make([][]interface{}, len(resultSpansSet)), Events: make([][]interface{}, len(resultSpansSet)),
IsSubTree: true,
}, },
} }
@@ -192,7 +186,7 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
// If the parent span is not found, add current span to list of roots // If the parent span is not found, add current span to list of roots
if parent == nil { if parent == nil {
// zap.L().Debug("Parent Span not found parent_id: ", span.ParentID) // zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
roots = append(roots, span) roots = append(roots, span)
span.ParentID = "" span.ParentID = ""
continue continue
@@ -219,7 +213,7 @@ func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*
} }
for _, child := range current.Children { for _, child := range current.Children {
if ok := visited[child.SpanID]; !ok { if ok, _ := visited[child.SpanID]; !ok {
queue = append(queue, child) queue = append(queue, child)
} }
} }

View File

@@ -1,17 +1,12 @@
package app package app
import ( import (
"bytes"
"context" "context"
"encoding/json"
"fmt" "fmt"
"io"
"net" "net"
"net/http" "net/http"
"net/http/httputil"
_ "net/http/pprof" // http profiler _ "net/http/pprof" // http profiler
"os" "os"
"regexp"
"time" "time"
"github.com/gorilla/handlers" "github.com/gorilla/handlers"
@@ -22,31 +17,16 @@ import (
"github.com/soheilhy/cmux" "github.com/soheilhy/cmux"
"go.signoz.io/signoz/ee/query-service/app/api" "go.signoz.io/signoz/ee/query-service/app/api"
"go.signoz.io/signoz/ee/query-service/app/db" "go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/auth"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/dao" "go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces" "go.signoz.io/signoz/ee/query-service/interfaces"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
licensepkg "go.signoz.io/signoz/ee/query-service/license" licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage" "go.signoz.io/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/query-service/agentConf"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/dashboards" "go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
"go.signoz.io/signoz/pkg/query-service/cache"
baseconst "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck" "go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces" baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine" pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
rules "go.signoz.io/signoz/pkg/query-service/rules" rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry" "go.signoz.io/signoz/pkg/query-service/telemetry"
@@ -54,31 +34,21 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
const AppDbEngine = "sqlite"
type ServerOptions struct { type ServerOptions struct {
PromConfigPath string PromConfigPath string
SkipTopLvlOpsPath string HTTPHostPort string
HTTPHostPort string PrivateHostPort string
PrivateHostPort string
// alert specific params // alert specific params
DisableRules bool DisableRules bool
RuleRepoURL string RuleRepoURL string
PreferDelta bool
PreferSpanMetrics bool
MaxIdleConns int
MaxOpenConns int
DialTimeout time.Duration
CacheConfigPath string
FluxInterval string
Cluster string
GatewayUrl string
} }
// Server runs HTTP api service // Server runs HTTP api service
type Server struct { type Server struct {
serverOptions *ServerOptions serverOptions *ServerOptions
conn net.Listener
ruleManager *rules.Manager ruleManager *rules.Manager
separatePorts bool
// public http router // public http router
httpConn net.Listener httpConn net.Listener
@@ -88,10 +58,8 @@ type Server struct {
privateConn net.Listener privateConn net.Listener
privateHTTP *http.Server privateHTTP *http.Server
// Usage manager // feature flags
usageManager *usage.Manager featureLookup baseint.FeatureLookup
opampServer *opamp.Server
unavailableChannel chan healthcheck.Status unavailableChannel chan healthcheck.Status
} }
@@ -109,8 +77,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err return nil, err
} }
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH) localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil { if err != nil {
@@ -119,33 +85,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
localDB.SetMaxOpenConns(10) localDB.SetMaxOpenConns(10)
gatewayFeature := basemodel.Feature{
Name: "GATEWAY",
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
}
//Activate this feature if the url is not empty
var gatewayProxy *httputil.ReverseProxy
if serverOptions.GatewayUrl == "" {
gatewayFeature.Active = false
gatewayProxy, err = gateway.NewNoopProxy()
if err != nil {
return nil, err
}
} else {
zap.L().Info("Enabling gateway feature flag ...")
gatewayFeature.Active = true
gatewayProxy, err = gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
if err != nil {
return nil, err
}
}
// initiate license manager // initiate license manager
lm, err := licensepkg.StartManager("sqlite", localDB, gatewayFeature) lm, err := licensepkg.StartManager("sqlite", localDB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -157,28 +98,12 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
var reader interfaces.DataConnector var reader interfaces.DataConnector
storage := os.Getenv("STORAGE") storage := os.Getenv("STORAGE")
if storage == "clickhouse" { if storage == "clickhouse" {
zap.L().Info("Using ClickHouse as datastore ...") zap.S().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector( qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
localDB,
serverOptions.PromConfigPath,
lm,
serverOptions.MaxIdleConns,
serverOptions.MaxOpenConns,
serverOptions.DialTimeout,
serverOptions.Cluster,
)
go qb.Start(readerReady) go qb.Start(readerReady)
reader = qb reader = qb
} else { } else {
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage) return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
}
skipConfig := &basemodel.SkipConfig{}
if serverOptions.SkipTopLvlOpsPath != "" {
// read skip config
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
if err != nil {
return nil, err
}
} }
<-readerReady <-readerReady
@@ -187,46 +112,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.RuleRepoURL, serverOptions.RuleRepoURL,
localDB, localDB,
reader, reader,
serverOptions.DisableRules, serverOptions.DisableRules)
lm)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// initiate opamp
_, err = opAmpModel.InitDB(localDB)
if err != nil {
return nil, err
}
integrationsController, err := integrations.NewController(localDB)
if err != nil {
return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err,
)
}
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
)
if err != nil {
return nil, err
}
// initiate agent config handler
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
DB: localDB,
DBEngine: AppDbEngine,
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
})
if err != nil {
return nil, err
}
// start the usagemanager // start the usagemanager
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn()) usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -236,41 +129,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
} }
telemetry.GetInstance().SetReader(reader) telemetry.GetInstance().SetReader(reader)
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
if err != nil {
return nil, err
}
apiOpts := api.APIHandlerOptions{ apiOpts := api.APIHandlerOptions{
DataConnector: reader, DataConnector: reader,
SkipConfig: skipConfig, AppDao: modelDao,
PreferDelta: serverOptions.PreferDelta, RulesManager: rm,
PreferSpanMetrics: serverOptions.PreferSpanMetrics, FeatureFlags: lm,
MaxIdleConns: serverOptions.MaxIdleConns, LicenseManager: lm,
MaxOpenConns: serverOptions.MaxOpenConns,
DialTimeout: serverOptions.DialTimeout,
AppDao: modelDao,
RulesManager: rm,
UsageManager: usageManager,
FeatureFlags: lm,
LicenseManager: lm,
IntegrationsController: integrationsController,
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
Gateway: gatewayProxy,
} }
apiHandler, err := api.NewAPIHandler(apiOpts) apiHandler, err := api.NewAPIHandler(apiOpts)
@@ -284,7 +149,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
ruleManager: rm, ruleManager: rm,
serverOptions: serverOptions, serverOptions: serverOptions,
unavailableChannel: make(chan healthcheck.Status), unavailableChannel: make(chan healthcheck.Status),
usageManager: usageManager,
} }
httpServer, err := s.createPublicServer(apiHandler) httpServer, err := s.createPublicServer(apiHandler)
@@ -302,18 +166,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
s.privateHTTP = privateServer s.privateHTTP = privateServer
s.opampServer = opamp.InitializeServer(
&opAmpModel.AllAgents, agentConfMgr,
)
return s, nil return s, nil
} }
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) { func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := baseapp.NewRouter() r := mux.NewRouter()
r.Use(baseapp.LogCommentEnricher)
r.Use(setTimeoutMiddleware) r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware) r.Use(s.analyticsMiddleware)
r.Use(loggingMiddlewarePrivate) r.Use(loggingMiddlewarePrivate)
@@ -325,7 +184,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
// ip here for alert manager // ip here for alert manager
AllowedOrigins: []string{"*"}, AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"}, AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"}, AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
}) })
handler := c.Handler(r) handler := c.Handler(r)
@@ -338,24 +197,15 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) { func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := baseapp.NewRouter() r := mux.NewRouter()
// add auth middleware
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
return auth.GetUserFromRequest(r, apiHandler)
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(baseapp.LogCommentEnricher)
r.Use(setTimeoutMiddleware) r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware) r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware) r.Use(loggingMiddleware)
apiHandler.RegisterRoutes(r, am) apiHandler.RegisterRoutes(r)
apiHandler.RegisterLogsRoutes(r, am) apiHandler.RegisterMetricsRoutes(r)
apiHandler.RegisterIntegrationRoutes(r, am) apiHandler.RegisterLogsRoutes(r)
apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am)
c := cors.New(cors.Options{ c := cors.New(cors.Options{
AllowedOrigins: []string{"*"}, AllowedOrigins: []string{"*"},
@@ -379,7 +229,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate() path, _ := route.GetPathTemplate()
startTime := time.Now() startTime := time.Now()
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path)) zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
}) })
} }
@@ -391,7 +241,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate() path, _ := route.GetPathTemplate()
startTime := time.Now() startTime := time.Now()
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true)) zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
}) })
} }
@@ -416,137 +266,18 @@ func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush() lrw.ResponseWriter.(http.Flusher).Flush()
} }
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFromV3 := "/api/v3/query_range"
pathToExtractBodyFromV4 := "/api/v4/query_range"
data := map[string]interface{}{}
var postData *v3.QueryRangeParamsV3
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
if r.Body != nil {
bodyBytes, err := io.ReadAll(r.Body)
if err != nil {
return nil, false
}
r.Body.Close() // must close
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &postData)
} else {
return nil, false
}
} else {
return nil, false
}
referrer := r.Header.Get("Referer")
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the referrer", zap.Error(err))
}
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the alert: ", zap.Error(err))
}
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
}
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
}
signozMetricsUsed := false
signozLogsUsed := false
signozTracesUsed := false
if postData != nil {
if postData.CompositeQuery != nil {
data["queryType"] = postData.CompositeQuery.QueryType
data["panelType"] = postData.CompositeQuery.PanelType
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
}
}
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
if signozMetricsUsed {
telemetry.GetInstance().AddActiveMetricsUser()
}
if signozLogsUsed {
telemetry.GetInstance().AddActiveLogsUser()
}
if signozTracesUsed {
telemetry.GetInstance().AddActiveTracesUser()
}
data["metricsUsed"] = signozMetricsUsed
data["logsUsed"] = signozLogsUsed
data["tracesUsed"] = signozTracesUsed
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil {
// switch case to set data["screen"] based on the referrer
switch {
case dashboardMatched:
data["screen"] = "panel"
case alertMatched:
data["screen"] = "alert"
case logsExplorerMatched:
data["screen"] = "logs-explorer"
case traceExplorerMatched:
data["screen"] = "traces-explorer"
default:
data["screen"] = "unknown"
return data, true
}
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
}
}
return data, true
}
func getActiveLogs(path string, r *http.Request) {
// if path == "/api/v1/dashboards/{uuid}" {
// telemetry.GetInstance().AddActiveMetricsUser()
// }
if path == "/api/v1/logs" {
hasFilters := len(r.URL.Query().Get("q"))
if hasFilters > 0 {
telemetry.GetInstance().AddActiveLogsUser()
}
}
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler { func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := baseauth.AttachJwtToContext(r.Context(), r)
r = r.WithContext(ctx)
route := mux.CurrentRoute(r) route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate() path, _ := route.GetPathTemplate()
queryRangeData, metadataExists := extractQueryRangeData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w) lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r) next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode} data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range queryRangeData {
data[key] = value
}
}
if _, ok := telemetry.EnabledPaths()[path]; ok { if _, ok := telemetry.IgnoredPaths()[path]; !ok {
userEmail, err := baseauth.GetEmailFromJwt(r.Context()) telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
}
} }
}) })
@@ -559,7 +290,7 @@ func setTimeoutMiddleware(next http.Handler) http.Handler {
// check if route is not excluded // check if route is not excluded
url := r.URL.Path url := r.URL.Path
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok { if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout) ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
defer cancel() defer cancel()
} }
@@ -582,7 +313,7 @@ func (s *Server) initListeners() error {
return err return err
} }
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort)) zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services // listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort privateHostPort := s.serverOptions.PrivateHostPort
@@ -595,7 +326,7 @@ func (s *Server) initListeners() error {
if err != nil { if err != nil {
return err return err
} }
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort)) zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil return nil
} }
@@ -607,7 +338,7 @@ func (s *Server) Start() error {
if !s.serverOptions.DisableRules { if !s.serverOptions.DisableRules {
s.ruleManager.Start() s.ruleManager.Start()
} else { } else {
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE") zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
} }
err := s.initListeners() err := s.initListeners()
@@ -621,23 +352,23 @@ func (s *Server) Start() error {
} }
go func() { go func() {
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort)) zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err { switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed: case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do // normal exit, nothing to do
default: default:
zap.L().Error("Could not start HTTP server", zap.Error(err)) zap.S().Error("Could not start HTTP server", zap.Error(err))
} }
s.unavailableChannel <- healthcheck.Unavailable s.unavailableChannel <- healthcheck.Unavailable
}() }()
go func() { go func() {
zap.L().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort)) zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
err = http.ListenAndServe(baseconst.DebugHttpPort, nil) err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
if err != nil { if err != nil {
zap.L().Error("Could not start pprof server", zap.Error(err)) zap.S().Error("Could not start pprof server", zap.Error(err))
} }
}() }()
@@ -645,56 +376,22 @@ func (s *Server) Start() error {
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil { if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
privatePort = port privatePort = port
} }
fmt.Println("starting private http")
go func() { go func() {
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort)) zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err { switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed: case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do // normal exit, nothing to do
zap.L().Info("private http server closed") zap.S().Info("private http server closed")
default: default:
zap.L().Error("Could not start private HTTP server", zap.Error(err)) zap.S().Error("Could not start private HTTP server", zap.Error(err))
} }
s.unavailableChannel <- healthcheck.Unavailable s.unavailableChannel <- healthcheck.Unavailable
}() }()
go func() {
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
if err != nil {
zap.L().Error("opamp ws server failed to start", zap.Error(err))
s.unavailableChannel <- healthcheck.Unavailable
}
}()
return nil
}
func (s *Server) Stop() error {
if s.httpServer != nil {
if err := s.httpServer.Shutdown(context.Background()); err != nil {
return err
}
}
if s.privateHTTP != nil {
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
return err
}
}
s.opampServer.Stop()
if s.ruleManager != nil {
s.ruleManager.Stop()
}
// stop usage manager
s.usageManager.Stop()
return nil return nil
} }
@@ -704,8 +401,7 @@ func makeRulesManager(
ruleRepoURL string, ruleRepoURL string,
db *sqlx.DB, db *sqlx.DB,
ch baseint.Reader, ch baseint.Reader,
disableRules bool, disableRules bool) (*rules.Manager, error) {
fm baseint.FeatureLookup) (*rules.Manager, error) {
// create engine // create engine
pqle, err := pqle.FromConfigPath(promConfigPath) pqle, err := pqle.FromConfigPath(promConfigPath)
@@ -732,8 +428,6 @@ func makeRulesManager(
Context: context.Background(), Context: context.Background(),
Logger: nil, Logger: nil,
DisableRules: disableRules, DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
} }
// create Manager // create Manager
@@ -742,7 +436,7 @@ func makeRulesManager(
return nil, fmt.Errorf("rule manager error: %v", err) return nil, fmt.Errorf("rule manager error: %v", err)
} }
zap.L().Info("rules manager is ready") zap.S().Info("rules manager is ready")
return manager, nil return manager, nil
} }

View File

@@ -1,56 +0,0 @@
package auth
import (
"context"
"fmt"
"net/http"
"time"
"go.signoz.io/signoz/ee/query-service/app/api"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
patToken := r.Header.Get("SIGNOZ-API-KEY")
if len(patToken) > 0 {
zap.L().Debug("Received a non-zero length PAT token")
ctx := context.Background()
dao := apiHandler.AppDao()
pat, err := dao.GetPAT(ctx, patToken)
if err == nil && pat != nil {
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
return nil, fmt.Errorf("PAT has expired")
}
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
if apiErr != nil {
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
return nil, apiErr
}
user, err := dao.GetUser(ctx, pat.UserID)
if err != nil {
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
telemetry.GetInstance().SetPatTokenUser()
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
user.User.GroupId = group.Id
user.User.Id = pat.Id
return &basemodel.UserPayload{
User: user.User,
Role: pat.Role,
}, nil
}
if err != nil {
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
}
return baseauth.GetUserFromRequest(r)
}

View File

@@ -9,10 +9,8 @@ const (
) )
var LicenseSignozIo = "https://license.signoz.io/api/v1" var LicenseSignozIo = "https://license.signoz.io/api/v1"
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "") var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
func GetOrDefaultEnv(key string, fallback string) string { func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key) v := os.Getenv(key)

View File

@@ -3,7 +3,6 @@ package dao
import ( import (
"context" "context"
"net/url" "net/url"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
@@ -21,24 +20,16 @@ type ModelDao interface {
DB() *sqlx.DB DB() *sqlx.DB
// auth methods // auth methods
CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops // org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *model.OrgDomain) *basemodel.ApiError CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError)
UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError
GetPAT(ctx context.Context, pat string) (*model.PAT, *basemodel.ApiError)
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) *basemodel.ApiError
GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError)
ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError)
RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError
} }

View File

@@ -5,97 +5,39 @@ import (
"fmt" "fmt"
"net/url" "net/url"
"strings" "strings"
"time"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils" baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.uber.org/zap" "go.uber.org/zap"
) )
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, *basemodel.ApiError) { // PrepareSsoRedirect prepares redirect page link after SSO response
// get auth domain from email domain
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
zap.L().Error("failed to get domain from email", zap.Error(apierr))
return nil, basemodel.InternalError(fmt.Errorf("failed to get domain from email"))
}
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
if err != nil {
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
return nil, basemodel.InternalError(fmt.Errorf("failed to generate password hash"))
}
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
if apiErr != nil {
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
return nil, apiErr
}
user := &basemodel.User{
Id: uuid.NewString(),
Name: "",
Email: email,
Password: hash,
CreatedAt: time.Now().Unix(),
ProfilePictureURL: "", // Currently unused
GroupId: group.Id,
OrgId: domain.OrgId,
}
user, apiErr = m.CreateUser(ctx, user, false)
if apiErr != nil {
zap.L().Error("CreateUser failed", zap.Error(apiErr))
return nil, apiErr
}
return user, nil
}
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available) // is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError) { func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email) userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() { if !apierr.IsNil() {
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error())) zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
return "", basemodel.BadRequest(fmt.Errorf("invalid user email received from the auth provider")) return "", model.BadRequestStr("invalid user email received from the auth provider")
} }
user := &basemodel.User{} tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
if userPayload == nil {
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
user = newUser
if apiErr != nil {
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
return "", apiErr
}
} else {
user = &userPayload.User
}
tokenStore, err := baseauth.GenerateJWTForUser(user)
if err != nil { if err != nil {
zap.L().Error("failed to generate token for SSO login user", zap.Error(err)) zap.S().Errorf("failed to generate token for SSO login user", err)
return "", basemodel.InternalError(fmt.Errorf("failed to generate token for the user")) return "", model.InternalErrorStr("failed to generate token for the user")
} }
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s", return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri, redirectUri,
tokenStore.AccessJwt, tokenStore.AccessJwt,
user.Id, userPayload.User.Id,
tokenStore.RefreshJwt), nil tokenStore.RefreshJwt), nil
} }
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError) { func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
domain, apierr := m.GetDomainByEmail(ctx, email) domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil { if apierr != nil {
return false, apierr return false, apierr
@@ -110,7 +52,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, *bas
} }
if userPayload.Role != baseconst.AdminGroup { if userPayload.Role != baseconst.AdminGroup {
return false, basemodel.BadRequest(fmt.Errorf("auth method not supported")) return false, model.BadRequest(fmt.Errorf("auth method not supported"))
} }
} }
@@ -120,10 +62,10 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, *bas
// PrecheckLogin is called when the login or signup page is loaded // PrecheckLogin is called when the login or signup page is loaded
// to check sso login is to be prompted // to check sso login is to be prompted
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, *basemodel.ApiError) { func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
// assume user is valid unless proven otherwise // assume user is valid unless proven otherwise
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false} resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
// check if email is a valid user // check if email is a valid user
userPayload, baseApiErr := m.GetUserByEmail(ctx, email) userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
@@ -134,7 +76,6 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
if userPayload == nil { if userPayload == nil {
resp.IsUser = false resp.IsUser = false
} }
ssoAvailable := true ssoAvailable := true
err := m.checkFeature(model.SSO) err := m.checkFeature(model.SSO)
if err != nil { if err != nil {
@@ -143,15 +84,13 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
// do nothing, just skip sso // do nothing, just skip sso
ssoAvailable = false ssoAvailable = false
default: default:
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err)) zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
return resp, &basemodel.ApiError{Err: err, Typ: basemodel.ErrorBadData} return resp, model.BadRequest(err)
} }
} }
if ssoAvailable { if ssoAvailable {
resp.IsUser = true
// find domain from email // find domain from email
orgDomain, apierr := m.GetDomainByEmail(ctx, email) orgDomain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil { if apierr != nil {
@@ -160,7 +99,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
if len(emailComponents) > 0 { if len(emailComponents) > 0 {
emailDomain = emailComponents[1] emailDomain = emailComponents[1]
} }
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError())) zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
return resp, apierr return resp, apierr
} }
@@ -176,8 +115,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
escapedUrl, _ := url.QueryUnescape(sourceUrl) escapedUrl, _ := url.QueryUnescape(sourceUrl)
siteUrl, err := url.Parse(escapedUrl) siteUrl, err := url.Parse(escapedUrl)
if err != nil { if err != nil {
zap.L().Error("failed to parse referer", zap.Error(err)) zap.S().Errorf("failed to parse referer", err)
return resp, basemodel.InternalError(fmt.Errorf("failed to generate login request")) return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
} }
// build Idp URL that will authenticat the user // build Idp URL that will authenticat the user
@@ -185,8 +124,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl) resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
if err != nil { if err != nil {
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err)) zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
return resp, basemodel.InternalError(err) return resp, model.InternalError(err)
} }
// set SSO to true, as the url is generated correctly // set SSO to true, as the url is generated correctly

View File

@@ -4,8 +4,8 @@ import (
"context" "context"
"database/sql" "database/sql"
"encoding/json" "encoding/json"
"fmt"
"net/url" "net/url"
"fmt"
"strings" "strings"
"time" "time"
@@ -28,95 +28,54 @@ type StoredDomain struct {
// GetDomainFromSsoResponse uses relay state received from IdP to fetch // GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response. // user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url) // when sending login request to IdP we send relay state as URL (site url)
// with domainId or domainName as query parameter. // with domainId as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) { func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
// derive domain id from relay state now // derive domain id from relay state now
var domainIdStr string var domainIdStr string
var domainNameStr string
var domain *model.OrgDomain
for k, v := range relayState.Query() { for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 { if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1) domainIdStr = strings.Replace(v[0], ":", "-", -1)
} }
if k == "domainName" && len(v) > 0 {
domainNameStr = v[0]
}
} }
if domainIdStr != "" { domainId, err := uuid.Parse(domainIdStr)
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
return nil, fmt.Errorf("failed to parse domainId from IdP response")
}
domain, err = m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
return nil, fmt.Errorf("invalid credentials")
}
}
if domainNameStr != "" {
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
domain = domainFromDB
if (err != nil) || domain == nil {
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
return nil, fmt.Errorf("invalid credentials")
}
}
if domain != nil {
return domain, nil
}
return nil, fmt.Errorf("failed to find domain received in IdP response")
}
// GetDomainByName returns org domain for a given domain name
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, *basemodel.ApiError) {
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
if err != nil { if err != nil {
if err == sql.ErrNoRows { zap.S().Errorf("failed to parse domain id from relay state", err)
return nil, basemodel.BadRequest(fmt.Errorf("invalid domain name")) return nil, fmt.Errorf("failed to parse response from IdP response")
}
return nil, basemodel.InternalError(err)
} }
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId} domain, err := m.GetDomain(ctx, domainId)
if err := domain.LoadConfig(stored.Data); err != nil { if (err != nil) || domain == nil {
return nil, basemodel.InternalError(err) zap.S().Errorf("failed to find domain received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
} }
return domain, nil return domain, nil
} }
// GetDomain returns org domain for a given domain id // GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError) { func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
stored := StoredDomain{} stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id) err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
if err != nil { if err != nil {
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, basemodel.BadRequest(fmt.Errorf("invalid domain id")) return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
} }
return nil, basemodel.InternalError(err) return nil, model.InternalError(err)
} }
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId} domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil { if err := domain.LoadConfig(stored.Data); err != nil {
return nil, basemodel.InternalError(err) return domain, model.InternalError(err)
} }
return domain, nil return domain, nil
} }
// ListDomains gets the list of auth domains by org id // ListDomains gets the list of auth domains by org id
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError) { func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
domains := []model.OrgDomain{} domains := []model.OrgDomain{}
stored := []StoredDomain{} stored := []StoredDomain{}
@@ -126,13 +85,13 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return []model.OrgDomain{}, nil return []model.OrgDomain{}, nil
} }
return nil, basemodel.InternalError(err) return nil, model.InternalError(err)
} }
for _, s := range stored { for _, s := range stored {
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId} domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
if err := domain.LoadConfig(s.Data); err != nil { if err := domain.LoadConfig(s.Data); err != nil {
zap.L().Error("ListDomains() failed", zap.Error(err)) zap.S().Errorf("ListDomains() failed", zap.Error(err))
} }
domains = append(domains, domain) domains = append(domains, domain)
} }
@@ -141,20 +100,20 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
} }
// CreateDomain creates a new auth domain // CreateDomain creates a new auth domain
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError { func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil { if domain.Id == uuid.Nil {
domain.Id = uuid.New() domain.Id = uuid.New()
} }
if domain.OrgId == "" || domain.Name == "" { if domain.OrgId == "" || domain.Name == "" {
return basemodel.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name ")) return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
} }
configJson, err := json.Marshal(domain) configJson, err := json.Marshal(domain)
if err != nil { if err != nil {
zap.L().Error("failed to unmarshal domain config", zap.Error(err)) zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("domain creation failed")) return model.InternalError(fmt.Errorf("domain creation failed"))
} }
_, err = m.DB().ExecContext(ctx, _, err = m.DB().ExecContext(ctx,
@@ -167,25 +126,25 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) *b
time.Now().Unix()) time.Now().Unix())
if err != nil { if err != nil {
zap.L().Error("failed to insert domain in db", zap.Error(err)) zap.S().Errorf("failed to insert domain in db", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("domain creation failed")) return model.InternalError(fmt.Errorf("domain creation failed"))
} }
return nil return nil
} }
// UpdateDomain updates stored config params for a domain // UpdateDomain updates stored config params for a domain
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError { func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil { if domain.Id == uuid.Nil {
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return basemodel.InternalError(fmt.Errorf("domain update failed")) return model.InternalError(fmt.Errorf("domain update failed"))
} }
configJson, err := json.Marshal(domain) configJson, err := json.Marshal(domain)
if err != nil { if err != nil {
zap.L().Error("domain update failed", zap.Error(err)) zap.S().Errorf("domain update failed", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("domain update failed")) return model.InternalError(fmt.Errorf("domain update failed"))
} }
_, err = m.DB().ExecContext(ctx, _, err = m.DB().ExecContext(ctx,
@@ -195,19 +154,19 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) *b
domain.Id) domain.Id)
if err != nil { if err != nil {
zap.L().Error("domain update failed", zap.Error(err)) zap.S().Errorf("domain update failed", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("domain update failed")) return model.InternalError(fmt.Errorf("domain update failed"))
} }
return nil return nil
} }
// DeleteDomain deletes an org domain // DeleteDomain deletes an org domain
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError { func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
if id == uuid.Nil { if id == uuid.Nil {
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return basemodel.InternalError(fmt.Errorf("domain delete failed")) return model.InternalError(fmt.Errorf("domain delete failed"))
} }
_, err := m.DB().ExecContext(ctx, _, err := m.DB().ExecContext(ctx,
@@ -215,22 +174,22 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.Ap
id) id)
if err != nil { if err != nil {
zap.L().Error("domain delete failed", zap.Error(err)) zap.S().Errorf("domain delete failed", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("domain delete failed")) return model.InternalError(fmt.Errorf("domain delete failed"))
} }
return nil return nil
} }
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError) { func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
if email == "" { if email == "" {
return nil, basemodel.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email ")) return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
} }
components := strings.Split(email, "@") components := strings.Split(email, "@")
if len(components) < 2 { if len(components) < 2 {
return nil, basemodel.BadRequest(fmt.Errorf("invalid email address")) return nil, model.BadRequest(fmt.Errorf("invalid email address"))
} }
parsedDomain := components[1] parsedDomain := components[1]
@@ -242,12 +201,12 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} }
return nil, basemodel.InternalError(err) return nil, model.InternalError(err)
} }
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId} domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil { if err := domain.LoadConfig(stored.Data); err != nil {
return nil, basemodel.InternalError(err) return domain, model.InternalError(err)
} }
return domain, nil return domain, nil
} }

View File

@@ -7,7 +7,6 @@ import (
basedao "go.signoz.io/signoz/pkg/query-service/dao" basedao "go.signoz.io/signoz/pkg/query-service/dao"
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite" basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces" baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
"go.uber.org/zap"
) )
type modelDao struct { type modelDao struct {
@@ -29,41 +28,6 @@ func (m *modelDao) checkFeature(key string) error {
return m.flags.CheckFeature(key) return m.flags.CheckFeature(key)
} }
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
rows, err := db.Query(query)
if err != nil {
zap.L().Error("Failed to query table info", zap.Error(err))
return false
}
defer rows.Close()
var (
cid int
name string
ctype string
notnull int
dflt_value *string
pk int
)
for rows.Next() {
err := rows.Scan(&cid, &name, &ctype, &notnull, &dflt_value, &pk)
if err != nil {
zap.L().Error("Failed to scan table info", zap.Error(err))
return false
}
if name == columnName {
return true
}
}
err = rows.Err()
if err != nil {
zap.L().Error("Failed to scan table info", zap.Error(err))
return false
}
return false
}
// InitDB creates and extends base model DB repository // InitDB creates and extends base model DB repository
func InitDB(dataSourceName string) (*modelDao, error) { func InitDB(dataSourceName string) (*modelDao, error) {
dao, err := basedsql.InitDB(dataSourceName) dao, err := basedsql.InitDB(dataSourceName)
@@ -84,58 +48,13 @@ func InitDB(dataSourceName string) (*modelDao, error) {
updated_at INTEGER, updated_at INTEGER,
data TEXT NOT NULL, data TEXT NOT NULL,
FOREIGN KEY(org_id) REFERENCES organizations(id) FOREIGN KEY(org_id) REFERENCES organizations(id)
); );`
CREATE TABLE IF NOT EXISTS personal_access_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
user_id TEXT NOT NULL,
token TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
created_at INTEGER NOT NULL,
expires_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_used INTEGER NOT NULL,
revoked BOOLEAN NOT NULL,
updated_by_user_id TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id)
);
`
_, err = m.DB().Exec(table_schema) _, err = m.DB().Exec(table_schema)
if err != nil { if err != nil {
return nil, fmt.Errorf("error in creating tables: %v", err.Error()) return nil, fmt.Errorf("error in creating tables: %v", err.Error())
} }
if !columnExists(m.DB(), "personal_access_tokens", "role") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
return m, nil return m, nil
} }

View File

@@ -1,199 +0,0 @@
package sqlite
import (
"context"
"fmt"
"strconv"
"time"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError) {
result, err := m.DB().ExecContext(ctx,
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
p.UserID,
p.Token,
p.Role,
p.Name,
p.CreatedAt,
p.ExpiresAt,
p.UpdatedAt,
p.UpdatedByUserID,
p.LastUsed,
p.Revoked,
)
if err != nil {
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
}
id, err := result.LastInsertId()
if err != nil {
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
}
p.Id = strconv.Itoa(int(id))
createdByUser, _ := m.GetUser(ctx, p.UserID)
if createdByUser == nil {
p.CreatedByUser = model.User{
NotFound: true,
}
} else {
p.CreatedByUser = model.User{
Id: createdByUser.Id,
Name: createdByUser.Name,
Email: createdByUser.Email,
CreatedAt: createdByUser.CreatedAt,
ProfilePictureURL: createdByUser.ProfilePictureURL,
NotFound: false,
}
}
return p, nil
}
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError {
_, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
p.Role,
p.Name,
p.UpdatedAt,
p.UpdatedByUserID,
id)
if err != nil {
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("PAT update failed"))
}
return nil
}
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) *basemodel.ApiError {
_, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
lastUsed,
token)
if err != nil {
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("PAT last used update failed"))
}
return nil
}
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PATs"))
}
for i := range pats {
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
if createdByUser == nil {
pats[i].CreatedByUser = model.User{
NotFound: true,
}
} else {
pats[i].CreatedByUser = model.User{
Id: createdByUser.Id,
Name: createdByUser.Name,
Email: createdByUser.Email,
CreatedAt: createdByUser.CreatedAt,
ProfilePictureURL: createdByUser.ProfilePictureURL,
NotFound: false,
}
}
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
if updatedByUser == nil {
pats[i].UpdatedByUser = model.User{
NotFound: true,
}
} else {
pats[i].UpdatedByUser = model.User{
Id: updatedByUser.Id,
Name: updatedByUser.Name,
Email: updatedByUser.Email,
CreatedAt: updatedByUser.CreatedAt,
ProfilePictureURL: updatedByUser.ProfilePictureURL,
NotFound: false,
}
}
}
return pats, nil
}
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError {
updatedAt := time.Now().Unix()
_, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
userID, updatedAt, id)
if err != nil {
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
return basemodel.InternalError(fmt.Errorf("PAT revoke failed"))
}
return nil
}
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, *basemodel.ApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
}
if len(pats) != 1 {
return nil, &basemodel.ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
}
}
return &pats[0], nil
}
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
}
if len(pats) != 1 {
return nil, &basemodel.ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token"),
}
}
return &pats[0], nil
}
// deprecated
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError) {
users := []basemodel.UserPayload{}
query := `SELECT
u.id,
u.name,
u.email,
u.password,
u.created_at,
u.profile_picture_url,
u.org_id,
u.group_id
FROM users u, personal_access_tokens p
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
if err := m.DB().Select(&users, query, token); err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
}
if len(users) != 1 {
return nil, &basemodel.ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
}
}
return &users[0], nil
}

View File

@@ -1,9 +0,0 @@
package gateway
import (
"net/http/httputil"
)
func NewNoopProxy() (*httputil.ReverseProxy, error) {
return nil, nil
}

View File

@@ -1,66 +0,0 @@
package gateway
import (
"net/http"
"net/http/httputil"
"net/url"
"path"
"strings"
)
const (
RoutePrefix string = "/api/gateway"
AllowedPrefix string = "/v1/workspaces/me"
)
type proxy struct {
url *url.URL
stripPath string
}
func NewProxy(u string, stripPath string) (*httputil.ReverseProxy, error) {
url, err := url.Parse(u)
if err != nil {
return nil, err
}
proxy := &proxy{url: url, stripPath: stripPath}
return &httputil.ReverseProxy{
Rewrite: proxy.rewrite,
ModifyResponse: proxy.modifyResponse,
ErrorHandler: proxy.errorHandler,
}, nil
}
func (p *proxy) rewrite(pr *httputil.ProxyRequest) {
pr.SetURL(p.url)
pr.SetXForwarded()
pr.Out.URL.Path = cleanPath(strings.ReplaceAll(pr.Out.URL.Path, p.stripPath, ""))
}
func (p *proxy) modifyResponse(res *http.Response) error {
return nil
}
func (p *proxy) errorHandler(rw http.ResponseWriter, req *http.Request, err error) {
rw.WriteHeader(http.StatusBadGateway)
}
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
if p[len(p)-1] == '/' && np != "/" {
if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
np = p
} else {
np += "/"
}
}
return np
}

View File

@@ -1,61 +0,0 @@
package gateway
import (
"context"
"net/http"
"net/http/httputil"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestProxyRewrite(t *testing.T) {
testCases := []struct {
name string
url *url.URL
stripPath string
in *url.URL
expected *url.URL
}{
{
name: "SamePathAdded",
url: &url.URL{Scheme: "http", Host: "backend", Path: "/path1"},
stripPath: "/strip",
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/path1/path1"},
},
{
name: "NoStripPathInput",
url: &url.URL{Scheme: "http", Host: "backend"},
stripPath: "",
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/strip/path1"},
},
{
name: "NoStripPathPresentInReq",
url: &url.URL{Scheme: "http", Host: "backend"},
stripPath: "/not-found",
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/strip/path1"},
},
}
for _, tc := range testCases {
proxy, err := NewProxy(tc.url.String(), tc.stripPath)
require.NoError(t, err)
inReq, err := http.NewRequest(http.MethodGet, tc.in.String(), nil)
require.NoError(t, err)
proxyReq := &httputil.ProxyRequest{
In: inReq,
Out: inReq.Clone(context.Background()),
}
proxy.Rewrite(proxyReq)
assert.Equal(t, tc.expected.Host, proxyReq.Out.URL.Host)
assert.Equal(t, tc.expected.Scheme, proxyReq.Out.URL.Scheme)
assert.Equal(t, tc.expected.Path, proxyReq.Out.URL.Path)
assert.Equal(t, tc.expected.Query(), proxyReq.Out.URL.Query())
}
}

View File

@@ -2,6 +2,11 @@ package signozio
type status string type status string
const (
statusSuccess status = "success"
statusError status = "error"
)
type ActivationResult struct { type ActivationResult struct {
Status status `json:"status"` Status status `json:"status"`
Data *ActivationResponse `json:"data,omitempty"` Data *ActivationResponse `json:"data,omitempty"`

View File

@@ -6,14 +6,13 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model" "go.uber.org/zap"
) )
var C *Client var C *Client
@@ -38,7 +37,7 @@ func init() {
} }
// ActivateLicense sends key to license.signoz.io and gets activation data // ActivateLicense sends key to license.signoz.io and gets activation data
func ActivateLicense(key, siteId string) (*ActivationResponse, *basemodel.ApiError) { func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
licenseReq := map[string]string{ licenseReq := map[string]string{
"key": key, "key": key,
"siteId": siteId, "siteId": siteId,
@@ -48,14 +47,14 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *basemodel.ApiErr
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString)) httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil { if err != nil {
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err)) zap.S().Errorf("failed to connect to license.signoz.io", err)
return nil, basemodel.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection")) return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
} }
httpBody, err := io.ReadAll(httpResponse.Body) httpBody, err := ioutil.ReadAll(httpResponse.Body)
if err != nil { if err != nil {
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err)) zap.S().Errorf("failed to read activation response from license.signoz.io", err)
return nil, basemodel.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io")) return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
} }
defer httpResponse.Body.Close() defer httpResponse.Body.Close()
@@ -64,23 +63,23 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *basemodel.ApiErr
result := ActivationResult{} result := ActivationResult{}
err = json.Unmarshal(httpBody, &result) err = json.Unmarshal(httpBody, &result)
if err != nil { if err != nil {
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err)) zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
return nil, basemodel.InternalError(errors.Wrap(err, "failed to marshal license activation response")) return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
} }
switch httpResponse.StatusCode { switch httpResponse.StatusCode {
case 200, 201: case 200, 201:
return result.Data, nil return result.Data, nil
case 400, 401: case 400, 401:
return nil, basemodel.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error))) return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
default: default:
return nil, basemodel.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error))) return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
} }
} }
// ValidateLicense validates the license key // ValidateLicense validates the license key
func ValidateLicense(activationId string) (*ActivationResponse, *basemodel.ApiError) { func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
validReq := map[string]string{ validReq := map[string]string{
"activationId": activationId, "activationId": activationId,
} }
@@ -89,12 +88,12 @@ func ValidateLicense(activationId string) (*ActivationResponse, *basemodel.ApiEr
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString)) response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil { if err != nil {
return nil, basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection")) return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
} }
body, err := io.ReadAll(response.Body) body, err := ioutil.ReadAll(response.Body)
if err != nil { if err != nil {
return nil, basemodel.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io")) return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
} }
defer response.Body.Close() defer response.Body.Close()
@@ -104,14 +103,14 @@ func ValidateLicense(activationId string) (*ActivationResponse, *basemodel.ApiEr
a := ActivationResult{} a := ActivationResult{}
err = json.Unmarshal(body, &a) err = json.Unmarshal(body, &a)
if err != nil { if err != nil {
return nil, basemodel.BadRequest(errors.Wrap(err, "failed to marshal license validation response")) return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
} }
return a.Data, nil return a.Data, nil
case 400, 401: case 400, 401:
return nil, basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)), return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io")) "bad request error received from license.signoz.io"))
default: default:
return nil, basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)), return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io")) "internal error received from license.signoz.io"))
} }
@@ -128,21 +127,21 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
} }
// SendUsage reports the usage of signoz to license server // SendUsage reports the usage of signoz to license server
func SendUsage(ctx context.Context, usage model.UsagePayload) *basemodel.ApiError { func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
reqString, _ := json.Marshal(usage) reqString, _ := json.Marshal(usage)
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString)) req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil { if err != nil {
return basemodel.BadRequest(errors.Wrap(err, "unable to create http request")) return model.BadRequest(errors.Wrap(err, "unable to create http request"))
} }
res, err := http.DefaultClient.Do(req) res, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection")) return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
} }
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
if err != nil { if err != nil {
return basemodel.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io")) return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
} }
defer res.Body.Close() defer res.Body.Close()
@@ -151,10 +150,10 @@ func SendUsage(ctx context.Context, usage model.UsagePayload) *basemodel.ApiErro
case 200, 201: case 200, 201:
return nil return nil
case 400, 401: case 400, 401:
return basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)), return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io")) "bad request error received from license.signoz.io"))
default: default:
return basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)), return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io")) "internal error received from license.signoz.io"))
} }
} }

View File

@@ -2,7 +2,6 @@ package license
import ( import (
"context" "context"
"database/sql"
"fmt" "fmt"
"time" "time"
@@ -10,7 +9,6 @@ import (
"go.signoz.io/signoz/ee/query-service/license/sqlite" "go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -48,9 +46,8 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
return licenses, nil return licenses, nil
} }
// GetActiveLicense fetches the latest active license from DB. // GetActiveLicense fetches the latest active license from DB
// If the license is not present, expect a nil license and a nil error in the output. func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
var err error var err error
licenses := []model.License{} licenses := []model.License{}
@@ -58,7 +55,7 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel
err = r.db.Select(&licenses, query) err = r.db.Select(&licenses, query)
if err != nil { if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err)) return nil, fmt.Errorf("failed to get active licenses from db: %v", err)
} }
var active *model.License var active *model.License
@@ -98,7 +95,7 @@ func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
l.ValidationMessage) l.ValidationMessage)
if err != nil { if err != nil {
zap.L().Error("error in inserting license data: ", zap.Error(err)) zap.S().Errorf("error in inserting license data: ", zap.Error(err))
return fmt.Errorf("failed to insert license in db: %v", err) return fmt.Errorf("failed to insert license in db: %v", err)
} }
@@ -111,7 +108,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
planDetails string) error { planDetails string) error {
if key == "" { if key == "" {
return fmt.Errorf("update plan details failed: license key is required") return fmt.Errorf("Update Plan Details failed: license key is required")
} }
query := `UPDATE licenses query := `UPDATE licenses
@@ -122,85 +119,9 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key) _, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
if err != nil { if err != nil {
zap.L().Error("error in updating license: ", zap.Error(err)) zap.S().Errorf("error in updating license: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err) return fmt.Errorf("failed to update license in db: %v", err)
} }
return nil return nil
} }
func (r *Repo) CreateFeature(req *basemodel.Feature) *basemodel.ApiError {
_, err := r.db.Exec(
`INSERT INTO feature_status (name, active, usage, usage_limit, route)
VALUES (?, ?, ?, ?, ?);`,
req.Name, req.Active, req.Usage, req.UsageLimit, req.Route)
if err != nil {
return &basemodel.ApiError{Typ: basemodel.ErrorInternal, Err: err}
}
return nil
}
func (r *Repo) GetFeature(featureName string) (basemodel.Feature, error) {
var feature basemodel.Feature
err := r.db.Get(&feature,
`SELECT * FROM feature_status WHERE name = ?;`, featureName)
if err != nil {
return feature, err
}
if feature.Name == "" {
return feature, basemodel.ErrFeatureUnavailable{Key: featureName}
}
return feature, nil
}
func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
var feature []basemodel.Feature
err := r.db.Select(&feature,
`SELECT * FROM feature_status;`)
if err != nil {
return feature, err
}
return feature, nil
}
func (r *Repo) UpdateFeature(req basemodel.Feature) error {
_, err := r.db.Exec(
`UPDATE feature_status SET active = ?, usage = ?, usage_limit = ?, route = ? WHERE name = ?;`,
req.Active, req.Usage, req.UsageLimit, req.Route, req.Name)
if err != nil {
return err
}
return nil
}
func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
// get a feature by name, if it doesn't exist, create it. If it does exist, update it.
for _, feature := range req {
currentFeature, err := r.GetFeature(feature.Name)
if err != nil && err == sql.ErrNoRows {
err := r.CreateFeature(&feature)
if err != nil {
return err
}
continue
} else if err != nil {
return err
}
feature.Usage = currentFeature.Usage
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
feature.Active = false
}
err = r.UpdateFeature(feature)
if err != nil {
return err
}
}
return nil
}

View File

@@ -10,7 +10,6 @@ import (
"sync" "sync"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants" baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio" validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
@@ -49,7 +48,8 @@ type Manager struct {
activeFeatures basemodel.FeatureSet activeFeatures basemodel.FeatureSet
} }
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) { func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
if LM != nil { if LM != nil {
return LM, nil return LM, nil
} }
@@ -65,7 +65,7 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
repo: &repo, repo: &repo,
} }
if err := m.start(features...); err != nil { if err := m.start(); err != nil {
return m, err return m, err
} }
LM = m LM = m
@@ -73,8 +73,8 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
} }
// start loads active license in memory and initiates validator // start loads active license in memory and initiates validator
func (lm *Manager) start(features ...basemodel.Feature) error { func (lm *Manager) start() error {
err := lm.LoadActiveLicense(features...) err := lm.LoadActiveLicense()
return err return err
} }
@@ -84,7 +84,7 @@ func (lm *Manager) Stop() {
<-lm.terminated <-lm.terminated
} }
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) { func (lm *Manager) SetActive(l *model.License) {
lm.mutex.Lock() lm.mutex.Lock()
defer lm.mutex.Unlock() defer lm.mutex.Unlock()
@@ -93,14 +93,9 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
} }
lm.activeLicense = l lm.activeLicense = l
lm.activeFeatures = append(l.FeatureSet, features...) lm.activeFeatures = l.FeatureSet
// set default features // set default features
setDefaultFeatures(lm) setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Panic("Couldn't activate features", zap.Error(err))
}
if !lm.validatorRunning { if !lm.validatorRunning {
// we want to make sure only one validator runs, // we want to make sure only one validator runs,
// we already have lock() so good to go // we already have lock() so good to go
@@ -111,37 +106,35 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
} }
func setDefaultFeatures(lm *Manager) { func setDefaultFeatures(lm *Manager) {
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...) for k, v := range baseconstants.DEFAULT_FEATURE_SET {
lm.activeFeatures[k] = v
}
} }
// LoadActiveLicense loads the most recent active license // LoadActiveLicense loads the most recent active license
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error { func (lm *Manager) LoadActiveLicense() error {
var err error
active, err := lm.repo.GetActiveLicense(context.Background()) active, err := lm.repo.GetActiveLicense(context.Background())
if err != nil { if err != nil {
return err return err
} }
if active != nil { if active != nil {
lm.SetActive(active, features...) lm.SetActive(active)
} else { } else {
zap.L().Info("No active license found, defaulting to basic plan") zap.S().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features // if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = model.BasicPlan lm.activeFeatures = basemodel.BasicPlan
setDefaultFeatures(lm) setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Error("Couldn't initialize features", zap.Error(err))
return err
}
} }
return nil return nil
} }
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *basemodel.ApiError) { func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicenses(ctx) licenses, err := lm.repo.GetLicenses(ctx)
if err != nil { if err != nil {
return nil, basemodel.InternalError(err) return nil, model.InternalError(err)
} }
for _, l := range licenses { for _, l := range licenses {
@@ -189,7 +182,7 @@ func (lm *Manager) Validator(ctx context.Context) {
// Validate validates the current active license // Validate validates the current active license
func (lm *Manager) Validate(ctx context.Context) (reterr error) { func (lm *Manager) Validate(ctx context.Context) (reterr error) {
zap.L().Info("License validation started") zap.S().Info("License validation started")
if lm.activeLicense == nil { if lm.activeLicense == nil {
return nil return nil
} }
@@ -199,12 +192,12 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
lm.lastValidated = time.Now().Unix() lm.lastValidated = time.Now().Unix()
if reterr != nil { if reterr != nil {
zap.L().Error("License validation completed with error", zap.Error(reterr)) zap.S().Errorf("License validation completed with error", reterr)
atomic.AddUint64(&lm.failedAttempts, 1) atomic.AddUint64(&lm.failedAttempts, 1)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED, telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "", true, false) map[string]interface{}{"err": reterr.Error()})
} else { } else {
zap.L().Info("License validation completed with no errors") zap.S().Info("License validation completed with no errors")
} }
lm.mutex.Unlock() lm.mutex.Unlock()
@@ -212,8 +205,8 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId) response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
if apiError != nil { if apiError != nil {
zap.L().Error("failed to validate license", zap.Any("apiError", apiError)) zap.S().Errorf("failed to validate license", apiError)
return apiError return apiError.Err
} }
if response.PlanDetails == lm.activeLicense.PlanDetails { if response.PlanDetails == lm.activeLicense.PlanDetails {
@@ -233,7 +226,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
} }
if err := l.ParsePlan(); err != nil { if err := l.ParsePlan(); err != nil {
zap.L().Error("failed to parse updated license", zap.Error(err)) zap.S().Errorf("failed to parse updated license", zap.Error(err))
return err return err
} }
@@ -243,7 +236,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
if err != nil { if err != nil {
// unexpected db write issue but we can let the user continue // unexpected db write issue but we can let the user continue
// and wait for update to work in next cycle. // and wait for update to work in next cycle.
zap.L().Error("failed to validate license", zap.Error(err)) zap.S().Errorf("failed to validate license", zap.Error(err))
} }
} }
@@ -255,20 +248,17 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
} }
// Activate activates a license key with signoz server // Activate activates a license key with signoz server
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *basemodel.ApiError) { func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
defer func() { defer func() {
if errResponse != nil { if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx) telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
if err == nil { map[string]interface{}{"err": errResponse.Err.Error()})
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
}
} }
}() }()
response, apiError := validate.ActivateLicense(key, "") response, apiError := validate.ActivateLicense(key, "")
if apiError != nil { if apiError != nil {
zap.L().Error("failed to activate license", zap.Any("apiError", apiError)) zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
return nil, apiError return nil, apiError
} }
@@ -282,15 +272,15 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
err := l.ParsePlan() err := l.ParsePlan()
if err != nil { if err != nil {
zap.L().Error("failed to activate license", zap.Error(err)) zap.S().Errorf("failed to activate license", zap.Error(err))
return nil, basemodel.InternalError(err) return nil, model.InternalError(err)
} }
// store the license before activating it // store the license before activating it
err = lm.repo.InsertLicense(ctx, l) err = lm.repo.InsertLicense(ctx, l)
if err != nil { if err != nil {
zap.L().Error("failed to activate license", zap.Error(err)) zap.S().Errorf("failed to activate license", zap.Error(err))
return nil, basemodel.InternalError(err) return nil, model.InternalError(err)
} }
// license is valid, activate it // license is valid, activate it
@@ -301,31 +291,18 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
// CheckFeature will be internally used by backend routines // CheckFeature will be internally used by backend routines
// for feature gating // for feature gating
func (lm *Manager) CheckFeature(featureKey string) error { func (lm *Manager) CheckFeature(featureKey string) error {
feature, err := lm.repo.GetFeature(featureKey) if value, ok := lm.activeFeatures[featureKey]; ok {
if err != nil { if value {
return err return nil
} }
if feature.Active { return basemodel.ErrFeatureUnavailable{Key: featureKey}
return nil
} }
return basemodel.ErrFeatureUnavailable{Key: featureKey} return basemodel.ErrFeatureUnavailable{Key: featureKey}
} }
// GetFeatureFlags returns current active features // GetFeatureFlags returns current active features
func (lm *Manager) GetFeatureFlags() (basemodel.FeatureSet, error) { func (lm *Manager) GetFeatureFlags() basemodel.FeatureSet {
return lm.repo.GetAllFeatures() return lm.activeFeatures
}
func (lm *Manager) InitFeatures(features basemodel.FeatureSet) error {
return lm.repo.InitFeatures(features)
}
func (lm *Manager) UpdateFeatureFlag(feature basemodel.Feature) error {
return lm.repo.UpdateFeature(feature)
}
func (lm *Manager) GetFeatureFlag(key string) (basemodel.Feature, error) {
return lm.repo.GetFeature(key)
} }
// GetRepo return the license repo // GetRepo return the license repo

View File

@@ -2,7 +2,6 @@ package sqlite
import ( import (
"fmt" "fmt"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
) )
@@ -32,21 +31,7 @@ func InitDB(db *sqlx.DB) error {
_, err = db.Exec(table_schema) _, err = db.Exec(table_schema)
if err != nil { if err != nil {
return fmt.Errorf("error in creating licenses table: %s", err.Error()) return fmt.Errorf("Error in creating licenses table: %s", err.Error())
} }
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
name TEXT PRIMARY KEY,
active bool,
usage INTEGER DEFAULT 0,
usage_limit INTEGER DEFAULT 0,
route TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
}
return nil return nil
} }

View File

@@ -3,168 +3,77 @@ package main
import ( import (
"context" "context"
"flag" "flag"
"log"
"os" "os"
"os/signal" "os/signal"
"strconv"
"syscall" "syscall"
"time"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app" "go.signoz.io/signoz/ee/query-service/app"
"go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/migrate"
"go.signoz.io/signoz/pkg/query-service/version" "go.signoz.io/signoz/pkg/query-service/version"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger { func initZapLog() *zap.Logger {
config := zap.NewProductionConfig() config := zap.NewDevelopmentConfig()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
defer stop()
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
config.EncoderConfig.TimeKey = "timestamp" config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logger, _ := config.Build()
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
defaultLogLevel := zapcore.InfoLevel
res := resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String("query-service"),
)
core := zapcore.NewTee(
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
)
if enableQueryServiceLogOTLPExport {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
log.Fatalf("failed to establish connection: %v", err)
} else {
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
if err != nil {
logExportBatchSizeInt = 512
}
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
BatchSize: logExportBatchSizeInt,
ResourceSchema: semconv.SchemaURL,
Resource: res,
}))
core = zapcore.NewTee(
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
)
}
}
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
return logger return logger
} }
func main() { func main() {
var promConfigPath, skipTopLvlOpsPath string var promConfigPath string
// disables rule execution but allows change to the rule definition // disables rule execution but allows change to the rule definition
var disableRules bool var disableRules bool
// the url used to build link in the alert messages in slack and other systems // the url used to build link in the alert messages in slack and other systems
var ruleRepoURL string var ruleRepoURL string
var cluster string
var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool
var preferDelta bool
var preferSpanMetrics bool
var maxIdleConns int
var maxOpenConns int
var dialTimeout time.Duration
var gatewayUrl string
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
flag.Parse() flag.Parse()
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport) loggerMgr := initZapLog()
zap.ReplaceGlobals(loggerMgr) zap.ReplaceGlobals(loggerMgr)
defer loggerMgr.Sync() // flushes buffer, if any defer loggerMgr.Sync() // flushes buffer, if any
logger := loggerMgr.Sugar()
version.PrintVersion() version.PrintVersion()
serverOptions := &app.ServerOptions{ serverOptions := &app.ServerOptions{
HTTPHostPort: baseconst.HTTPHostPort, HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath, PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath, PrivateHostPort: baseconst.PrivateHostPort,
PreferDelta: preferDelta, DisableRules: disableRules,
PreferSpanMetrics: preferSpanMetrics, RuleRepoURL: ruleRepoURL,
PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval,
Cluster: cluster,
GatewayUrl: gatewayUrl,
} }
// Read the jwt secret key // Read the jwt secret key
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET") auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 { if len(auth.JwtSecret) == 0 {
zap.L().Warn("No JWT secret key is specified.") zap.S().Warn("No JWT secret key is specified.")
} else { } else {
zap.L().Info("JWT secret key set successfully.") zap.S().Info("No JWT secret key set successfully.")
}
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
zap.L().Error("Failed to migrate", zap.Error(err))
} else {
zap.L().Info("Migration successful")
} }
server, err := app.NewServer(serverOptions) server, err := app.NewServer(serverOptions)
if err != nil { if err != nil {
zap.L().Fatal("Failed to create server", zap.Error(err)) logger.Fatal("Failed to create server", zap.Error(err))
} }
if err := server.Start(); err != nil { if err := server.Start(); err != nil {
zap.L().Fatal("Could not start server", zap.Error(err)) logger.Fatal("Could not start servers", zap.Error(err))
} }
if err := auth.InitAuthCache(context.Background()); err != nil { if err := auth.InitAuthCache(context.Background()); err != nil {
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err)) logger.Fatal("Failed to initialize auth cache", zap.Error(err))
} }
signalsChannel := make(chan os.Signal, 1) signalsChannel := make(chan os.Signal, 1)
@@ -173,10 +82,9 @@ func main() {
for { for {
select { select {
case status := <-server.HealthCheckStatus(): case status := <-server.HealthCheckStatus():
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status))) logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
case <-signalsChannel: case <-signalsChannel:
zap.L().Fatal("Received OS Interrupt Signal ... ") logger.Fatal("Received OS Interrupt Signal ... ")
server.Stop()
} }
} }
} }

View File

@@ -4,9 +4,18 @@ import (
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
// PrecheckResponse contains login precheck response
type PrecheckResponse struct {
SSO bool `json:"sso"`
SsoUrl string `json:"ssoUrl"`
CanSelfRegister bool `json:"canSelfRegister"`
IsUser bool `json:"isUser"`
SsoError string `json:"ssoError"`
}
// GettableInvitation overrides base object and adds precheck into // GettableInvitation overrides base object and adds precheck into
// response // response
type GettableInvitation struct { type GettableInvitation struct {
*basemodel.InvitationResponseObject *basemodel.InvitationResponseObject
Precheck *basemodel.PrecheckResponse `json:"precheck"` Precheck *PrecheckResponse `json:"precheck"`
} }

View File

@@ -9,8 +9,8 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/pkg/errors" "github.com/pkg/errors"
saml2 "github.com/russellhaering/gosaml2" saml2 "github.com/russellhaering/gosaml2"
"go.signoz.io/signoz/ee/query-service/sso"
"go.signoz.io/signoz/ee/query-service/sso/saml" "go.signoz.io/signoz/ee/query-service/sso/saml"
"go.signoz.io/signoz/ee/query-service/sso"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -24,16 +24,16 @@ const (
// OrgDomain identify org owned web domains for auth and other purposes // OrgDomain identify org owned web domains for auth and other purposes
type OrgDomain struct { type OrgDomain struct {
Id uuid.UUID `json:"id"` Id uuid.UUID `json:"id"`
Name string `json:"name"` Name string `json:"name"`
OrgId string `json:"orgId"` OrgId string `json:"orgId"`
SsoEnabled bool `json:"ssoEnabled"` SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"` SsoType SSOType `json:"ssoType"`
SamlConfig *SamlConfig `json:"samlConfig"` SamlConfig *SamlConfig `json:"samlConfig"`
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"` GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
Org *basemodel.Organization Org *basemodel.Organization
} }
func (od *OrgDomain) String() string { func (od *OrgDomain) String() string {
@@ -100,11 +100,11 @@ func (od *OrgDomain) GetSAMLCert() string {
return "" return ""
} }
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in // PrepareGoogleOAuthProvider creates GoogleProvider that is used in
// requesting OAuth and also used in processing response from google // requesting OAuth and also used in processing response from google
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) { func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
if od.GoogleAuthConfig == nil { if od.GoogleAuthConfig == nil {
return nil, fmt.Errorf("GOOGLE OAUTH is not setup correctly for this domain") return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
} }
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl) return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
@@ -137,36 +137,38 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
} }
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) { func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1) fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
// build redirect url from window.location sent by frontend // build redirect url from window.location sent by frontend
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path) redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
// prepare state that gets relayed back when the auth provider // prepare state that gets relayed back when the auth provider
// calls back our url. here we pass the app url (where signoz runs) // calls back our url. here we pass the app url (where signoz runs)
// and the domain Id. The domain Id helps in identifying sso config // and the domain Id. The domain Id helps in identifying sso config
// when the call back occurs and the app url is useful in redirecting user // when the call back occurs and the app url is useful in redirecting user
// back to the right path. // back to the right path.
// why do we need to pass app url? the callback typically is handled by backend // why do we need to pass app url? the callback typically is handled by backend
// and sometimes backend might right at a different port or is unaware of frontend // and sometimes backend might right at a different port or is unaware of frontend
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request // endpoint (unless SITE_URL param is set). hence, we receive this build sso request
// along with frontend window.location and use it to relay the information through // along with frontend window.location and use it to relay the information through
// auth provider to the backend (HandleCallback or HandleSSO method). // auth provider to the backend (HandleCallback or HandleSSO method).
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId) relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
switch od.SsoType { switch (od.SsoType) {
case SAML: case SAML:
sp, err := od.PrepareSamlRequest(siteUrl) sp, err := od.PrepareSamlRequest(siteUrl)
if err != nil { if err != nil {
return "", err return "", err
} }
return sp.BuildAuthURL(relayState) return sp.BuildAuthURL(relayState)
case GoogleAuth: case GoogleAuth:
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl) googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
if err != nil { if err != nil {
return "", err return "", err
@@ -174,8 +176,9 @@ func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
return googleProvider.BuildAuthURL(relayState) return googleProvider.BuildAuthURL(relayState)
default: default:
zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name)) zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
return "", fmt.Errorf("unsupported SSO config for the domain") return "", fmt.Errorf("unsupported SSO config for the domain")
} }
} }

View File

@@ -1,5 +1,106 @@
package model package model
import (
"fmt"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
type ApiError struct {
Typ basemodel.ErrorType
Err error
}
func (a *ApiError) Type() basemodel.ErrorType {
return a.Typ
}
func (a *ApiError) ToError() error {
if a != nil {
return a.Err
}
return a.Err
}
func (a *ApiError) Error() string {
return a.Err.Error()
}
func (a *ApiError) IsNil() bool {
return a == nil || a.Err == nil
}
// NewApiError returns a ApiError object of given type
func NewApiError(typ basemodel.ErrorType, err error) *ApiError {
return &ApiError{
Typ: typ,
Err: err,
}
}
// BadRequest returns a ApiError object of bad request
func BadRequest(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorBadData,
Err: err,
}
}
// BadRequestStr returns a ApiError object of bad request for string input
func BadRequestStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorBadData,
Err: fmt.Errorf(s),
}
}
// InternalError returns a ApiError object of internal type
func InternalError(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorInternal,
Err: err,
}
}
// InternalErrorStr returns a ApiError object of internal type for string input
func InternalErrorStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf(s),
}
}
var (
ErrorNone basemodel.ErrorType = ""
ErrorTimeout basemodel.ErrorType = "timeout"
ErrorCanceled basemodel.ErrorType = "canceled"
ErrorExec basemodel.ErrorType = "execution"
ErrorBadData basemodel.ErrorType = "bad_data"
ErrorInternal basemodel.ErrorType = "internal"
ErrorUnavailable basemodel.ErrorType = "unavailable"
ErrorNotFound basemodel.ErrorType = "not_found"
ErrorNotImplemented basemodel.ErrorType = "not_implemented"
ErrorUnauthorized basemodel.ErrorType = "unauthorized"
ErrorForbidden basemodel.ErrorType = "forbidden"
ErrorConflict basemodel.ErrorType = "conflict"
ErrorStreamingNotSupported basemodel.ErrorType = "streaming is not supported"
)
func init() {
ErrorNone = basemodel.ErrorNone
ErrorTimeout = basemodel.ErrorTimeout
ErrorCanceled = basemodel.ErrorCanceled
ErrorExec = basemodel.ErrorExec
ErrorBadData = basemodel.ErrorBadData
ErrorInternal = basemodel.ErrorInternal
ErrorUnavailable = basemodel.ErrorUnavailable
ErrorNotFound = basemodel.ErrorNotFound
ErrorNotImplemented = basemodel.ErrorNotImplemented
ErrorUnauthorized = basemodel.ErrorUnauthorized
ErrorForbidden = basemodel.ErrorForbidden
ErrorConflict = basemodel.ErrorConflict
ErrorStreamingNotSupported = basemodel.ErrorStreamingNotSupported
}
type ErrUnsupportedAuth struct{} type ErrUnsupportedAuth struct{}
func (errUnsupportedAuth ErrUnsupportedAuth) Error() string { func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {

View File

@@ -89,18 +89,3 @@ func (l *License) ParseFeatures() {
l.FeatureSet = BasicPlan l.FeatureSet = BasicPlan
} }
} }
type Licenses struct {
TrialStart int64 `json:"trialStart"`
TrialEnd int64 `json:"trialEnd"`
OnTrial bool `json:"onTrial"`
WorkSpaceBlock bool `json:"workSpaceBlock"`
TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
GracePeriodEnd int64 `json:"gracePeriodEnd"`
Licenses []License `json:"licenses"`
}
type SubscriptionServerResp struct {
Status string `json:"status"`
Data Licenses `json:"data"`
}

View File

@@ -1,32 +0,0 @@
package model
type User struct {
Id string `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Email string `json:"email" db:"email"`
CreatedAt int64 `json:"createdAt" db:"created_at"`
ProfilePictureURL string `json:"profilePictureURL" db:"profile_picture_url"`
NotFound bool `json:"notFound"`
}
type CreatePATRequestBody struct {
Name string `json:"name"`
Role string `json:"role"`
ExpiresInDays int64 `json:"expiresInDays"`
}
type PAT struct {
Id string `json:"id" db:"id"`
UserID string `json:"userId" db:"user_id"`
CreatedByUser User `json:"createdByUser"`
UpdatedByUser User `json:"updatedByUser"`
Token string `json:"token" db:"token"`
Role string `json:"role" db:"role"`
Name string `json:"name" db:"name"`
CreatedAt int64 `json:"createdAt" db:"created_at"`
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
UpdatedAt int64 `json:"updatedAt" db:"updated_at"`
LastUsed int64 `json:"lastUsed" db:"last_used"`
Revoked bool `json:"revoked" db:"revoked"`
UpdatedByUserID string `json:"updatedByUserId" db:"updated_by_user_id"`
}

View File

@@ -9,308 +9,23 @@ const Basic = "BASIC_PLAN"
const Pro = "PRO_PLAN" const Pro = "PRO_PLAN"
const Enterprise = "ENTERPRISE_PLAN" const Enterprise = "ENTERPRISE_PLAN"
const DisableUpsell = "DISABLE_UPSELL" const DisableUpsell = "DISABLE_UPSELL"
const Onboarding = "ONBOARDING"
const ChatSupport = "CHAT_SUPPORT"
var BasicPlan = basemodel.FeatureSet{ var BasicPlan = basemodel.FeatureSet{
basemodel.Feature{ Basic: true,
Name: SSO, SSO: false,
Active: false, DisableUpsell: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.OSS,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: DisableUpsell,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.SmartTraceDetail,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.CustomMetricsFunction,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderPanels,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderAlerts,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelSlack,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelWebhook,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelPagerduty,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelOpsgenie,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelEmail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelMsTeams,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }
var ProPlan = basemodel.FeatureSet{ var ProPlan = basemodel.FeatureSet{
basemodel.Feature{ Pro: true,
Name: SSO, SSO: true,
Active: true, basemodel.SmartTraceDetail: true,
Usage: 0, basemodel.CustomMetricsFunction: true,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.OSS,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.SmartTraceDetail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.CustomMetricsFunction,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderPanels,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderAlerts,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelSlack,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelWebhook,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelPagerduty,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelOpsgenie,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelEmail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelMsTeams,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }
var EnterprisePlan = basemodel.FeatureSet{ var EnterprisePlan = basemodel.FeatureSet{
basemodel.Feature{ Enterprise: true,
Name: SSO, SSO: true,
Active: true, basemodel.SmartTraceDetail: true,
Usage: 0, basemodel.CustomMetricsFunction: true,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.OSS,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.SmartTraceDetail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.CustomMetricsFunction,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderPanels,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderAlerts,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelSlack,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelWebhook,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelPagerduty,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelOpsgenie,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelEmail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelMsTeams,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: Onboarding,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: ChatSupport,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }

View File

@@ -20,8 +20,6 @@ type Usage struct {
TimeStamp time.Time `json:"timestamp"` TimeStamp time.Time `json:"timestamp"`
Count int64 `json:"count"` Count int64 `json:"count"`
Size int64 `json:"size"` Size int64 `json:"size"`
OrgName string `json:"orgName"`
TenantId string `json:"tenantId"`
} }
type UsageDB struct { type UsageDB struct {

View File

@@ -102,6 +102,6 @@ func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*
IDPCertificateStore: certStore, IDPCertificateStore: certStore,
SPKeyStore: randomKeyStore, SPKeyStore: randomKeyStore,
} }
zap.L().Debug("SAML request", zap.Any("sp", sp)) zap.S().Debugf("SAML request:", sp)
return sp, nil return sp, nil
} }

View File

@@ -4,19 +4,16 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os"
"regexp"
"strings" "strings"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2"
"github.com/go-co-op/gocron"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.uber.org/zap" "go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/dao"
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio" licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/license" "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
@@ -31,6 +28,9 @@ const (
) )
var ( var (
// send usage every 24 hour
uploadFrequency = 24 * time.Hour
locker = stateUnlocked locker = stateUnlocked
) )
@@ -39,30 +39,20 @@ type Manager struct {
licenseRepo *license.Repo licenseRepo *license.Repo
scheduler *gocron.Scheduler // end the usage routine, this is important to gracefully
// stopping usage reporting and protect in-consistent updates
done chan struct{}
modelDao dao.ModelDao // terminated waits for the UsageExporter go routine to end
terminated chan struct{}
tenantID string
} }
func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) { func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
tenantID := ""
if len(hostNameRegexMatches) == 2 {
tenantID = hostNameRegexMatches[1]
tenantID = strings.TrimSuffix(tenantID, "-clickhouse")
}
m := &Manager{ m := &Manager{
// repository: repo, // repository: repo,
clickhouseConn: clickhouseConn, clickhouseConn: clickhouseConn,
licenseRepo: licenseRepo, licenseRepo: licenseRepo,
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
modelDao: modelDao,
tenantID: tenantID,
} }
return m, nil return m, nil
} }
@@ -74,30 +64,37 @@ func (lm *Manager) Start() error {
return fmt.Errorf("usage exporter is locked") return fmt.Errorf("usage exporter is locked")
} }
_, err := lm.scheduler.Do(func() { lm.UploadUsage() }) go lm.UsageExporter(context.Background())
if err != nil {
return err
}
// upload usage once when starting the service
lm.UploadUsage()
lm.scheduler.StartAsync()
return nil return nil
} }
func (lm *Manager) UploadUsage() {
ctx := context.Background() func (lm *Manager) UsageExporter(ctx context.Context) {
defer close(lm.terminated)
uploadTicker := time.NewTicker(uploadFrequency)
defer uploadTicker.Stop()
for {
select {
case <-lm.done:
return
case <-uploadTicker.C:
lm.UploadUsage(ctx)
}
}
}
func (lm *Manager) UploadUsage(ctx context.Context) error {
// check if license is present or not // check if license is present or not
license, err := lm.licenseRepo.GetActiveLicense(ctx) license, err := lm.licenseRepo.GetActiveLicense(context.Background())
if err != nil { if err != nil {
zap.L().Error("failed to get active license", zap.Error(err)) return fmt.Errorf("failed to get active license")
return
} }
if license == nil { if license == nil {
// we will not start the usage reporting if license is not present. // we will not start the usage reporting if license is not present.
zap.L().Info("no license present, skipping usage reporting") zap.S().Info("no license present, skipping usage reporting")
return return nil
} }
usages := []model.UsageDB{} usages := []model.UsageDB{}
@@ -123,8 +120,7 @@ func (lm *Manager) UploadUsage() {
dbusages := []model.UsageDB{} dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour))) err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") { if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err)) return err
return
} }
for _, u := range dbusages { for _, u := range dbusages {
u.Type = db u.Type = db
@@ -133,42 +129,29 @@ func (lm *Manager) UploadUsage() {
} }
if len(usages) <= 0 { if len(usages) <= 0 {
zap.L().Info("no snapshots to upload, skipping.") zap.S().Info("no snapshots to upload, skipping.")
return return nil
} }
zap.L().Info("uploading usage data") zap.S().Info("uploading usage data")
orgName := ""
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
if orgError != nil {
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
}
if len(orgNames) == 1 {
orgName = orgNames[0].Name
}
usagesPayload := []model.Usage{} usagesPayload := []model.Usage{}
for _, usage := range usages { for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data)) usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil { if err != nil {
zap.L().Error("error while decrypting usage data: %v", zap.Error(err)) return err
return
} }
usageData := model.Usage{} usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData) err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil { if err != nil {
zap.L().Error("error while unmarshalling usage data: %v", zap.Error(err)) return err
return
} }
usageData.CollectorID = usage.CollectorID usageData.CollectorID = usage.CollectorID
usageData.ExporterID = usage.ExporterID usageData.ExporterID = usage.ExporterID
usageData.Type = usage.Type usageData.Type = usage.Type
usageData.Tenant = usage.Tenant usageData.Tenant = usage.Tenant
usageData.OrgName = orgName
usageData.TenantId = lm.tenantID
usagesPayload = append(usagesPayload, usageData) usagesPayload = append(usagesPayload, usageData)
} }
@@ -177,33 +160,34 @@ func (lm *Manager) UploadUsage() {
LicenseKey: key, LicenseKey: key,
Usage: usagesPayload, Usage: usagesPayload,
} }
lm.UploadUsageWithExponentalBackOff(ctx, payload) err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
if err != nil {
return err
}
return nil
} }
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) { func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
for i := 1; i <= MaxRetries; i++ { for i := 1; i <= MaxRetries; i++ {
apiErr := licenseserver.SendUsage(ctx, payload) apiErr := licenseserver.SendUsage(ctx, payload)
if apiErr != nil && i == MaxRetries { if apiErr != nil && i == MaxRetries {
zap.L().Error("retries stopped : %v", zap.Error(apiErr)) zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
// not returning error here since it is captured in the failed count // not returning error here since it is captured in the failed count
return return nil
} else if apiErr != nil { } else if apiErr != nil {
// sleeping for exponential backoff // sleeping for exponential backoff
sleepDuration := RetryInterval * time.Duration(i) sleepDuration := RetryInterval * time.Duration(i)
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Any("apiErr", apiErr)) zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
time.Sleep(sleepDuration) time.Sleep(sleepDuration)
} else { } else {
break break
} }
} }
return nil
} }
func (lm *Manager) Stop() { func (lm *Manager) Stop() {
lm.scheduler.Stop() close(lm.done)
zap.L().Info("sending usage data before shutting down")
// send usage before shutting down
lm.UploadUsage()
atomic.StoreUint32(&locker, stateUnlocked) atomic.StoreUint32(&locker, stateUnlocked)
<-lm.terminated
} }

View File

@@ -1,7 +1,7 @@
{ {
"presets": [ "presets": [
"@babel/preset-env", "@babel/preset-env",
["@babel/preset-react", { "runtime": "automatic" }], "@babel/preset-react",
"@babel/preset-typescript" "@babel/preset-typescript"
], ],
"plugins": [ "plugins": [

View File

@@ -1,3 +1,5 @@
node_modules node_modules
.vscode .vscode
build
.env
.git .git

View File

@@ -16,7 +16,6 @@ module.exports = {
'plugin:sonarjs/recommended', 'plugin:sonarjs/recommended',
'plugin:import/errors', 'plugin:import/errors',
'plugin:import/warnings', 'plugin:import/warnings',
'plugin:react/jsx-runtime',
], ],
parser: '@typescript-eslint/parser', parser: '@typescript-eslint/parser',
parserOptions: { parserOptions: {
@@ -59,7 +58,7 @@ module.exports = {
'react/no-array-index-key': 'error', 'react/no-array-index-key': 'error',
'linebreak-style': [ 'linebreak-style': [
'error', 'error',
process.env.platform === 'win32' ? 'windows' : 'unix', process.platform === 'win32' ? 'windows' : 'unix',
], ],
'@typescript-eslint/default-param-last': 'off', '@typescript-eslint/default-param-last': 'off',
@@ -86,7 +85,6 @@ module.exports = {
}, },
], ],
'import/no-extraneous-dependencies': ['error', { devDependencies: true }], 'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
'no-plusplus': 'off',
'jsx-a11y/label-has-associated-control': [ 'jsx-a11y/label-has-associated-control': [
'error', 'error',
{ {
@@ -104,12 +102,12 @@ module.exports = {
}, },
], ],
'@typescript-eslint/no-unused-vars': 'error', '@typescript-eslint/no-unused-vars': 'error',
'func-style': ['error', 'declaration', { allowArrowFunctions: true }],
'arrow-body-style': ['error', 'as-needed'],
// eslint rules need to remove // eslint rules need to remove
'no-shadow': 'off',
'@typescript-eslint/no-shadow': 'off', '@typescript-eslint/no-shadow': 'off',
'import/no-cycle': 'off', 'import/no-cycle': 'off',
'prettier/prettier': [ 'prettier/prettier': [
'error', 'error',
{}, {},

Some files were not shown because too many files have changed in this diff Show More