Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8fde9008b2 |
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@@ -2,12 +2,6 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
|
||||
/frontend/ @palashgdev
|
||||
/frontend/ @palashgdev @pranshuchittora
|
||||
/deploy/ @prashant-shahi
|
||||
/sample-apps/ @prashant-shahi
|
||||
**/query-service/ @srikanthccv
|
||||
Makefile @srikanthccv
|
||||
go.* @srikanthccv
|
||||
.git* @srikanthccv
|
||||
.github @prashant-shahi
|
||||
/pkg/query-service/ @srikanthccv
|
||||
|
||||
2
.github/config.yml
vendored
2
.github/config.yml
vendored
@@ -17,7 +17,7 @@ newPRWelcomeComment: >
|
||||
# Comment to be posted to on pull requests merged by a first time user
|
||||
firstPRMergeComment: >
|
||||
Congrats on merging your first pull request!
|
||||
|
||||
|
||||

|
||||
|
||||
We here at SigNoz are proud of you! 🥳
|
||||
|
||||
39
.github/workflows/build.yaml
vendored
39
.github/workflows/build.yaml
vendored
@@ -12,28 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
- name: Run Jest
|
||||
run: cd frontend && npm run jest
|
||||
- name: TSC
|
||||
run: yarn tsc
|
||||
working-directory: ./frontend
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-frontend-amd64
|
||||
|
||||
build-frontend-ee:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Create .env file
|
||||
run: echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
@@ -51,16 +30,8 @@ jobs:
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
uses: actions/checkout@v2
|
||||
- name: Build query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -69,12 +40,8 @@ jobs:
|
||||
build-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
- name: Build EE query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
8
.github/workflows/codeql.yaml
vendored
8
.github/workflows/codeql.yaml
vendored
@@ -39,11 +39,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -68,4 +68,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v1
|
||||
|
||||
9
.github/workflows/commitlint.yml
vendored
9
.github/workflows/commitlint.yml
vendored
@@ -7,7 +7,12 @@ jobs:
|
||||
lint-commits:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
# we actually need "github.event.pull_request.commits + 1" commit
|
||||
fetch-depth: 0
|
||||
- uses: wagoid/commitlint-github-action@v5
|
||||
- uses: actions/setup-node@v2.1.0
|
||||
# or just "yarn" if you depend on "@commitlint/cli" already
|
||||
- run: yarn add @commitlint/cli
|
||||
- run: yarn add @commitlint/config-conventional
|
||||
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD
|
||||
|
||||
@@ -12,11 +12,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
|
||||
12
.github/workflows/e2e-k3s.yaml
vendored
12
.github/workflows/e2e-k3s.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
DOCKER_TAG: pull-${{ github.event.number }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build query-service image
|
||||
env:
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
--set frontend.service.type=LoadBalancer \
|
||||
--set queryService.image.tag=$DOCKER_TAG \
|
||||
--set frontend.image.tag=$DOCKER_TAG
|
||||
|
||||
|
||||
# get pods, services and the container images
|
||||
kubectl get pods -n platform
|
||||
kubectl get svc -n platform
|
||||
@@ -69,14 +69,12 @@ jobs:
|
||||
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
|
||||
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
||||
- name: Get short commit SHA and display tunnel URL
|
||||
id: get-subdomain
|
||||
run: |
|
||||
subdomain="pr-$(git rev-parse --short HEAD)"
|
||||
echo "URL for tunnelling: https://$subdomain.loca.lt"
|
||||
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT
|
||||
worker_ip="$(curl -4 -s ipconfig.io/ip)"
|
||||
echo "Worker node IP address: $worker_ip"
|
||||
echo "::set-output name=subdomain::$subdomain"
|
||||
|
||||
- name: Start tunnel
|
||||
env:
|
||||
|
||||
4
.github/workflows/playwright.yaml
vendored
4
.github/workflows/playwright.yaml
vendored
@@ -9,8 +9,8 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "16.x"
|
||||
- name: Install dependencies
|
||||
|
||||
5
.github/workflows/pr_verify_linked_issue.yml
vendored
5
.github/workflows/pr_verify_linked_issue.yml
vendored
@@ -5,7 +5,7 @@ name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, opened]
|
||||
types: [edited, synchronize, opened, reopened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
@@ -14,6 +14,7 @@ jobs:
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: srikanthccv/verify-linked-issue-action@v0.71
|
||||
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
74
.github/workflows/push.yaml
vendored
74
.github/workflows/push.yaml
vendored
@@ -14,19 +14,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
@@ -49,19 +49,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
@@ -84,7 +84,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
@@ -97,15 +97,15 @@ jobs:
|
||||
run: npm run lint
|
||||
continue-on-error: true
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
@@ -123,49 +123,3 @@ jobs:
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-frontend
|
||||
|
||||
image-build-and-push-frontend-ee:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Create .env file
|
||||
run: echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
- name: Run Prettier
|
||||
working-directory: frontend
|
||||
run: npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
working-directory: frontend
|
||||
run: npm run lint
|
||||
continue-on-error: true
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v5.1
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-frontend
|
||||
|
||||
6
.github/workflows/release-drafter.yml
vendored
6
.github/workflows/release-drafter.yml
vendored
@@ -12,12 +12,6 @@ on:
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
permissions:
|
||||
# write permission is required to create a github release
|
||||
contents: write
|
||||
# write permission is required for autolabeler
|
||||
# otherwise, read permission is required at least
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# (Optional) GitHub Enterprise requires GHE_HOST variable set
|
||||
|
||||
12
.github/workflows/remove-label.yaml
vendored
12
.github/workflows/remove-label.yaml
vendored
@@ -8,15 +8,9 @@ jobs:
|
||||
remove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove label ok-to-test from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
- name: Remove label
|
||||
uses: buildsville/add-remove-label@v1
|
||||
with:
|
||||
label: ok-to-test
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Remove label testing-deploy from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
with:
|
||||
label: testing-deploy
|
||||
label: ok-to-test,testing-deploy
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
25
.github/workflows/repo-stats.yml
vendored
Normal file
25
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 8 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: repostats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
uses: jgehrcke/github-repo-stats@v1.1.0
|
||||
with:
|
||||
# Define the stats repository (the repo to fetch
|
||||
# stats for and to generate the report for).
|
||||
# Remove the parameter when the stats repository
|
||||
# and the data repository are the same.
|
||||
repository: signoz/signoz
|
||||
# Set a GitHub API token that can read the stats
|
||||
# repository, and that can push to the data
|
||||
# repository (which this workflow file lives in),
|
||||
# to store data and the report files.
|
||||
ghtoken: ${{ github.token }}
|
||||
5
.github/workflows/sonar.yml
vendored
5
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
- v*
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Sonar analysis
|
||||
@@ -24,3 +24,4 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
|
||||
6
.github/workflows/staging-deployment.yaml
vendored
6
.github/workflows/staging-deployment.yaml
vendored
@@ -11,23 +11,21 @@ jobs:
|
||||
environment: staging
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
uses: appleboy/ssh-action@v0.1.6
|
||||
env:
|
||||
GITHUB_BRANCH: develop
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.SSH_KEY }}
|
||||
key: ${{ secrets.EC2_SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
4
.github/workflows/testing-deployment.yaml
vendored
4
.github/workflows/testing-deployment.yaml
vendored
@@ -11,14 +11,14 @@ jobs:
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
uses: appleboy/ssh-action@v0.1.6
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.SSH_KEY }}
|
||||
key: ${{ secrets.EC2_SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,5 +1,7 @@
|
||||
|
||||
node_modules
|
||||
yarn.lock
|
||||
package.json
|
||||
|
||||
deploy/docker/environment_tiny/common_test
|
||||
frontend/node_modules
|
||||
@@ -37,7 +39,7 @@ frontend/src/constants/env.ts
|
||||
**/locust-scripts/__pycache__/
|
||||
**/__debug_bin
|
||||
|
||||
frontend/.env
|
||||
frontend/*.env
|
||||
pkg/query-service/signoz.db
|
||||
|
||||
pkg/query-service/tests/test-deploy/data/
|
||||
@@ -50,6 +52,4 @@ ee/query-service/tests/test-deploy/data/
|
||||
*.db
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
|
||||
*/query-service/queries.active
|
||||
bin/
|
||||
@@ -80,7 +80,7 @@ Before sending us a pull request, please ensure that,
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
|
||||
|
||||
* 1️⃣ First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes, etc...
|
||||
@@ -338,7 +338,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
@@ -361,7 +361,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
|
||||
23
Makefile
23
Makefile
@@ -54,7 +54,7 @@ build-push-frontend:
|
||||
@echo "--> Building and pushing frontend docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
||||
docker buildx build --file Dockerfile --progress plane --push --platform linux/amd64 \
|
||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
@@ -73,7 +73,7 @@ build-push-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
@@ -98,7 +98,7 @@ build-push-ee-query-service:
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--progress plain --push --platform linux/arm64,linux/amd64 \
|
||||
--progress plane --push --platform linux/arm64,linux/amd64 \
|
||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
dev-setup:
|
||||
@@ -135,20 +135,3 @@ clear-standalone-data:
|
||||
clear-swarm-data:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
clear-standalone-ch:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
clear-swarm-ch:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/app/metrics/...
|
||||
go test ./pkg/query-service/cache/...
|
||||
go test ./pkg/query-service/app/...
|
||||
go test ./pkg/query-service/app/querier/...
|
||||
go test ./pkg/query-service/converter/...
|
||||
go test ./pkg/query-service/formatter/...
|
||||
go test ./pkg/query-service/tests/integration/...
|
||||
168
README.de-de.md
168
README.de-de.md
@@ -1,75 +1,40 @@
|
||||
<p align="center">
|
||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||
|
||||
|
||||
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
<a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
##
|
||||
|
||||
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. Mit SigNoz können Sie Folgendes tun:
|
||||
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
|
||||
|
||||
👉 Visualisieren Sie Metriken, Traces und Logs in einer einzigen Oberfläche.
|
||||
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
|
||||
|
||||
👉 Sie können Metriken wie die p99-Latenz, Fehlerquoten für Ihre Dienste, externe API-Aufrufe und individuelle Endpunkte anzeigen.
|
||||
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
|
||||
|
||||
👉 Sie können die Ursache des Problems ermitteln, indem Sie zu den genauen Traces gehen, die das Problem verursachen, und detaillierte Flammenbilder einzelner Anfragetraces anzeigen.
|
||||
|
||||
👉 Führen Sie Aggregationen auf Trace-Daten durch, um geschäftsrelevante Metriken zu erhalten.
|
||||
|
||||
👉 Filtern und Abfragen von Logs, Erstellen von Dashboards und Benachrichtigungen basierend auf Attributen in den Logs.
|
||||
|
||||
👉 Automatische Aufzeichnung von Ausnahmen in Python, Java, Ruby und Javascript.
|
||||
|
||||
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
||||
|
||||
##
|
||||
### Anwendung Metriken
|
||||
|
||||

|
||||
|
||||
|
||||
### Verteiltes Tracing
|
||||
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||
|
||||
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||
|
||||
### Log Verwaltung
|
||||
|
||||
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||
|
||||
### Infrastruktur Überwachung
|
||||
|
||||
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||
|
||||
### Exceptions Monitoring
|
||||
|
||||

|
||||
|
||||
|
||||
### Alarme
|
||||
|
||||
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
|
||||
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Werde Teil unserer Slack Community
|
||||
|
||||
@@ -77,22 +42,20 @@ Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||
|
||||
## Funktionen:
|
||||
|
||||
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
|
||||
- Überblick über Anwendungsmetriken wie RPS, Latenzzeiten des 50tes/90tes/99tes Perzentils und Fehlerquoten.
|
||||
- Langsamste Endpunkte in Ihrer Anwendung.
|
||||
- Zeigen Sie genaue Anfragetraces an, um Probleme in nachgelagerten Diensten, langsamen Datenbankabfragen oder Aufrufen von Drittanbieterdiensten wie Zahlungsgateways zu identifizieren.
|
||||
- Filtern Sie Traces nach Dienstname, Operation, Latenz, Fehler, Tags/Annotationen.
|
||||
- Führen Sie Aggregationen auf Trace-Daten (Ereignisse/Spans) durch, um geschäftsrelevante Metriken zu erhalten. Beispielsweise können Sie die Fehlerquote und die 99tes Perzentillatenz für `customer_type: gold` oder `deployment_version: v2` oder `external_call: paypal` erhalten.
|
||||
- Native Unterstützung für OpenTelemetry-Logs, erweiterten Log-Abfrage-Builder und automatische Log-Sammlung aus dem Kubernetes-Cluster.
|
||||
- Blitzschnelle Log-Analytik ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||
- End-to-End-Sichtbarkeit der Infrastrukturleistung, Aufnahme von Metriken aus allen Arten von Host-Umgebungen.
|
||||
- Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
||||
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
|
||||
- Übersicht der langsamsten Endpunkte deiner Anwendung.
|
||||
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
|
||||
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
|
||||
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
|
||||
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||
|
||||
## Wieso SigNoz?
|
||||
|
||||
@@ -102,46 +65,45 @@ Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDo
|
||||
|
||||
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
|
||||
|
||||
### Languages supported:
|
||||
### Unterstützte Programmiersprachen:
|
||||
|
||||
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als Bibliothek, mit der Sie Ihre Anwendungen instrumentieren können. Daher wird jedes von OpenTelemetry unterstützte Framework und jede Sprache auch von SignNoz unterstützt. Einige der wichtigsten unterstützten Sprachen sind:
|
||||
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
|
||||
|
||||
- Java
|
||||
- Python
|
||||
- NodeJS
|
||||
- Go
|
||||
- PHP
|
||||
- .NET
|
||||
- Ruby
|
||||
- Elixir
|
||||
- Rust
|
||||
|
||||
|
||||
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Erste Schritte mit SigNoz
|
||||
|
||||
|
||||
|
||||
### Bereitstellung mit Docker
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||
|
||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/install/troubleshooting/) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### Deploy in Kubernetes using Helm
|
||||
|
||||
|
||||
### Bereitstellung mit Kubernetes und Helm
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Vergleiche mit bekannten Tools
|
||||
## Vergleiche mit anderen Lösungen
|
||||
|
||||
### SigNoz vs Prometheus
|
||||
### SigNoz vs. Prometheus
|
||||
|
||||
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
|
||||
|
||||
@@ -149,79 +111,49 @@ Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzels
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Jaeger
|
||||
### SigNoz vs. Jaeger
|
||||
|
||||
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
|
||||
|
||||
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
|
||||
|
||||
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an.
|
||||
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag `customer_type=premium`, was hingegen mit SigNoz leicht umsetzbar ist.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Elastic
|
||||
|
||||
- Die Verwaltung von SigNoz-Protokollen basiert auf 'ClickHouse', einem spaltenbasierten OLAP-Datenspeicher, der aggregierte Protokollanalyseabfragen wesentlich effizienter macht.
|
||||
- 50 % geringerer Ressourcenbedarf im Vergleich zu Elastic während der Aufnahme.
|
||||
|
||||
Wir haben Benchmarks veröffentlicht, die Elastic mit SignNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Loki
|
||||
|
||||
- SigNoz unterstützt Aggregationen von Daten mit hoher Kardinalität über ein großes Volumen, Loki hingegen nicht.
|
||||
- SigNoz unterstützt Indizes über Daten mit hoher Kardinalität und hat keine Beschränkungen hinsichtlich der Anzahl der Indizes, während Loki maximale Streams erreicht, wenn ein paar Indizes hinzugefügt werden.
|
||||
- Das Durchsuchen großer Datenmengen ist in Loki im Vergleich zu SigNoz schwierig und langsam.
|
||||
|
||||
Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
|
||||
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Zum Projekt beitragen
|
||||
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
|
||||
|
||||
### Unsere Projektbetreuer
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
## Dokumentation
|
||||
|
||||
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Gemeinschaft
|
||||
## Community
|
||||
|
||||
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||
|
||||
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
|
||||
|
||||
Wie immer, Dank an unsere großartigen Mitwirkenden!
|
||||
Wie immer, danke an unsere großartigen Unterstützer!
|
||||
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
|
||||
61
README.md
61
README.md
@@ -5,7 +5,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
##
|
||||
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
||||
|
||||
👉 Visualise Metrics, Traces and Logs in a single pane of glass
|
||||
|
||||
@@ -35,41 +35,19 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
|
||||
|
||||
👉 Record exceptions automatically in Python, Java, Ruby, and Javascript
|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
|
||||
👉 Easy to set alerts with DIY query builder
|
||||
|
||||
|
||||
### Application Metrics
|
||||
|
||||

|
||||
|
||||
|
||||
### Distributed Tracing
|
||||
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||
|
||||
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||
|
||||
### Logs Management
|
||||
|
||||
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||
|
||||
### Infrastructure Monitoring
|
||||
|
||||
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||
|
||||
### Exceptions Monitoring
|
||||
|
||||

|
||||
|
||||
|
||||
### Alerts
|
||||
|
||||
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Join our Slack community
|
||||
|
||||
@@ -77,6 +55,7 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||
|
||||
## Features:
|
||||
|
||||
@@ -86,13 +65,10 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
||||
- Filter traces by service name, operation, latency, error, tags/annotations.
|
||||
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
|
||||
- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
|
||||
- Easy to set alerts with DIY query builder
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||
|
||||
## Why SigNoz?
|
||||
|
||||
@@ -121,14 +97,15 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
|
||||
|
||||
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
|
||||
The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshooting) may be helpful if you face any issues.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
@@ -139,6 +116,7 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Comparisons to Familiar Tools
|
||||
|
||||
@@ -166,8 +144,6 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
|
||||
- 50% lower resource requirement compared to Elastic during ingestion
|
||||
|
||||
We have published benchmarks comparing Elastic with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Loki
|
||||
@@ -176,10 +152,9 @@ We have published benchmarks comparing Elastic with SigNoz. Check it out [here](
|
||||
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
|
||||
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
|
||||
|
||||
We have published benchmarks comparing Loki with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -206,6 +181,7 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -213,6 +189,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Community
|
||||
|
||||
|
||||
@@ -84,9 +84,9 @@ Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry
|
||||
|
||||
### Implantar usando Docker
|
||||
|
||||
Siga as etapas listadas [aqui](https://signoz.io/docs/install/docker/) para instalar usando o Docker.
|
||||
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
|
||||
|
||||
Esse [guia para solução de problemas](https://signoz.io/docs/install/troubleshooting/) pode ser útil se você enfrentar quaisquer problemas.
|
||||
Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
@@ -80,9 +80,9 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
### 使用Docker部署
|
||||
|
||||
请按照[这里](https://signoz.io/docs/install/docker/)列出的步骤使用Docker来安装
|
||||
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
|
||||
|
||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/install/troubleshooting/)会对你有帮助。
|
||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
@@ -27,6 +27,12 @@ For x86 chip (amd):
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
```
|
||||
|
||||
For Mac with Apple chip (arm):
|
||||
|
||||
```sh
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
|
||||
the data generated from hotrod in SigNoz UI.
|
||||
|
||||
@@ -58,7 +64,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
To generate load:
|
||||
@@ -66,7 +72,7 @@ To generate load:
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
To stop load:
|
||||
|
||||
@@ -7,21 +7,9 @@
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<!-- For S3 cold storage,
|
||||
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
|
||||
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
|
||||
For GCS cold storage,
|
||||
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
|
||||
-->
|
||||
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
<!-- In case of S3, uncomment the below configuration in case you want to read
|
||||
AWS credentials from the Environment variables if they exist. -->
|
||||
<!-- <use_environment_credentials>true</use_environment_credentials> -->
|
||||
<!-- In case of GCS, uncomment the below configuration, since GCS does
|
||||
not support batch deletion and result in error messages in logs. -->
|
||||
<!-- <support_batch_delete>false</support_batch_delete> -->
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
version: "3.9"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
restart_policy:
|
||||
@@ -16,14 +16,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8123/ping"
|
||||
]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
@@ -41,7 +34,7 @@ x-clickhouse-depend: &clickhouse-depend
|
||||
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
image: bitnami/zookeeper:3.7.0
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
@@ -131,7 +124,7 @@ services:
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.4
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
@@ -144,12 +137,8 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.29.1
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--prefer-delta=true"
|
||||
]
|
||||
image: signoz/query-service:0.13.1
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
@@ -167,14 +156,7 @@ services:
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
@@ -184,7 +166,7 @@ services:
|
||||
<<: *clickhouse-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.29.1
|
||||
image: signoz/frontend:0.13.1
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -197,12 +179,8 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.79.7
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
image: signoz/signoz-otel-collector:0.66.1
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -210,11 +188,10 @@ services:
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
@@ -230,12 +207,8 @@ services:
|
||||
<<: *clickhouse-depend
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/signoz-otel-collector:0.79.7
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
image: signoz/signoz-otel-collector:0.66.1
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
@@ -248,22 +221,9 @@ services:
|
||||
condition: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
command: [ "all" ]
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
logging:
|
||||
@@ -272,7 +232,7 @@ services:
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
|
||||
@@ -1,21 +1,29 @@
|
||||
receivers:
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
filelog/dockercontainers:
|
||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
||||
start_at: end
|
||||
include_file_path: true
|
||||
include_file_name: false
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
||||
- type: json_parser
|
||||
id: parser-docker
|
||||
output: extract_metadata_from_filepath
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: regex_parser
|
||||
id: extract_metadata_from_filepath
|
||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
||||
parse_from: attributes["log.file.path"]
|
||||
output: parse_body
|
||||
- type: move
|
||||
id: parse_body
|
||||
from: attributes.log
|
||||
to: body
|
||||
output: time
|
||||
- type: remove
|
||||
id: time
|
||||
field: attributes.time
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
@@ -61,47 +69,13 @@ receivers:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
logstransform/internal:
|
||||
operators:
|
||||
- type: trace_parser
|
||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
||||
trace_id:
|
||||
parse_from: attributes.trace_id
|
||||
span_id:
|
||||
parse_from: attributes.span_id
|
||||
output: remove_trace_id
|
||||
- type: trace_parser
|
||||
if: '"traceId" in attributes or "spanId" in attributes'
|
||||
trace_id:
|
||||
parse_from: attributes.traceId
|
||||
span_id:
|
||||
parse_from: attributes.spanId
|
||||
output: remove_traceId
|
||||
- id: remove_traceId
|
||||
type: remove
|
||||
if: '"traceId" in attributes'
|
||||
field: attributes.traceId
|
||||
output: remove_spanId
|
||||
- id: remove_spanId
|
||||
type: remove
|
||||
if: '"spanId" in attributes'
|
||||
field: attributes.spanId
|
||||
- id: remove_trace_id
|
||||
type: remove
|
||||
if: '"trace_id" in attributes'
|
||||
field: attributes.trace_id
|
||||
output: remove_span_id
|
||||
- id: remove_span_id
|
||||
type: remove
|
||||
if: '"span_id" in attributes'
|
||||
field: attributes.span_id
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
@@ -112,10 +86,6 @@ processors:
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: 'signoz.collector.id'
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
@@ -136,7 +106,6 @@ exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
@@ -192,6 +161,6 @@ service:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [logstransform/internal, batch]
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
|
||||
@@ -24,16 +24,8 @@ server {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||
proxy_pass http://query-service:8080;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# dont buffer the data send it directly to client.
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
|
||||
@@ -905,8 +905,7 @@
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Configuration of user defined executable functions -->
|
||||
<user_defined_executable_functions_config>*function.xml</user_defined_executable_functions_config>
|
||||
<user_scripts_path>/var/lib/clickhouse/user_scripts/</user_scripts_path>
|
||||
<user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
|
||||
@@ -7,21 +7,9 @@
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<!-- For S3 cold storage,
|
||||
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
|
||||
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
|
||||
For GCS cold storage,
|
||||
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
|
||||
-->
|
||||
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
<!-- In case of S3, uncomment the below configuration in case you want to read
|
||||
AWS credentials from the Environment variables if they exist. -->
|
||||
<!-- <use_environment_credentials>true</use_environment_credentials> -->
|
||||
<!-- In case of GCS, uncomment the below configuration, since GCS does
|
||||
not support batch deletion and result in error messages in logs. -->
|
||||
<!-- <support_batch_delete>false</support_batch_delete> -->
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable</type>
|
||||
<name>histogramQuantile</name>
|
||||
<return_type>Float64</return_type>
|
||||
<argument>
|
||||
<type>Array(Float64)</type>
|
||||
<name>buckets</name>
|
||||
</argument>
|
||||
<argument>
|
||||
<type>Array(Float64)</type>
|
||||
<name>counts</name>
|
||||
</argument>
|
||||
<argument>
|
||||
<type>Float64</type>
|
||||
<name>quantile</name>
|
||||
</argument>
|
||||
<format>CSV</format>
|
||||
<command>./histogramQuantile</command>
|
||||
</function>
|
||||
</functions>
|
||||
@@ -2,8 +2,8 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
||||
container_name: signoz-clickhouse
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
container_name: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
@@ -20,21 +20,14 @@ services:
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8123/ping"
|
||||
]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
container_name: signoz-alertmanager
|
||||
image: signoz/alertmanager:0.23.4
|
||||
container_name: alertmanager
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
@@ -47,13 +40,9 @@ services:
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.79.7
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
container_name: otel-collector
|
||||
image: signoz/signoz-otel-collector:0.66.1
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -61,8 +50,8 @@ services:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
@@ -77,13 +66,9 @@ services:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
container_name: signoz-otel-collector-metrics
|
||||
image: signoz/signoz-otel-collector:0.79.7
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
container_name: otel-collector-metrics
|
||||
image: signoz/signoz-otel-collector:0.66.1
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
@@ -96,17 +81,6 @@ services:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
@@ -114,12 +88,12 @@ services:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: [ "all" ]
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
|
||||
@@ -9,7 +9,7 @@ services:
|
||||
args:
|
||||
LDFLAGS: ""
|
||||
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
|
||||
container_name: signoz-query-service
|
||||
container_name: query-service
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
@@ -22,24 +22,13 @@ services:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--prefer-delta=true"
|
||||
]
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
- "8080:8080"
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
@@ -54,7 +43,7 @@ services:
|
||||
args:
|
||||
TARGETOS: "${LOCAL_GOOS}"
|
||||
TARGETPLATFORM: "${LOCAL_GOARCH}"
|
||||
container_name: signoz-frontend
|
||||
container_name: frontend
|
||||
environment:
|
||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||
restart: on-failure
|
||||
|
||||
@@ -2,8 +2,7 @@ version: "2.4"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
@@ -15,14 +14,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8123/ping"
|
||||
]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
@@ -42,10 +34,10 @@ x-clickhouse-depend: &clickhouse-depend
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
image: bitnami/zookeeper:3.7.0
|
||||
container_name: zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
@@ -62,7 +54,7 @@ services:
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-2
|
||||
# container_name: zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
@@ -79,7 +71,7 @@ services:
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-3
|
||||
# container_name: zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
@@ -96,7 +88,7 @@ services:
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
container_name: clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
@@ -105,15 +97,13 @@ services:
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-2
|
||||
# container_name: clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
@@ -122,16 +112,13 @@ services:
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-3
|
||||
# container_name: clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
@@ -140,15 +127,12 @@ services:
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.4}
|
||||
container_name: signoz-alertmanager
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.0-0.2}
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
@@ -159,16 +143,12 @@ services:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.29.1}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--prefer-delta=true"
|
||||
]
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.13.1}
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
@@ -187,22 +167,15 @@ services:
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
<<: *clickhouse-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.29.1}
|
||||
container_name: signoz-frontend
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.13.1}
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
@@ -213,13 +186,8 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.7}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.1}
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -227,11 +195,10 @@ services:
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
@@ -244,13 +211,8 @@ services:
|
||||
<<: *clickhouse-depend
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.7}
|
||||
container_name: signoz-otel-collector-metrics
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.1}
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
@@ -261,30 +223,19 @@ services:
|
||||
restart: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
|
||||
@@ -1,21 +1,29 @@
|
||||
receivers:
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
filelog/dockercontainers:
|
||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
||||
start_at: end
|
||||
include_file_path: true
|
||||
include_file_name: false
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
||||
- type: json_parser
|
||||
id: parser-docker
|
||||
output: extract_metadata_from_filepath
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: regex_parser
|
||||
id: extract_metadata_from_filepath
|
||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
||||
parse_from: attributes["log.file.path"]
|
||||
output: parse_body
|
||||
- type: move
|
||||
id: parse_body
|
||||
from: attributes.log
|
||||
to: body
|
||||
output: time
|
||||
- type: remove
|
||||
id: time
|
||||
field: attributes.time
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
@@ -62,40 +70,6 @@ receivers:
|
||||
|
||||
|
||||
processors:
|
||||
logstransform/internal:
|
||||
operators:
|
||||
- type: trace_parser
|
||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
||||
trace_id:
|
||||
parse_from: attributes.trace_id
|
||||
span_id:
|
||||
parse_from: attributes.span_id
|
||||
output: remove_trace_id
|
||||
- type: trace_parser
|
||||
if: '"traceId" in attributes or "spanId" in attributes'
|
||||
trace_id:
|
||||
parse_from: attributes.traceId
|
||||
span_id:
|
||||
parse_from: attributes.spanId
|
||||
output: remove_traceId
|
||||
- id: remove_traceId
|
||||
type: remove
|
||||
if: '"traceId" in attributes'
|
||||
field: attributes.traceId
|
||||
output: remove_spanId
|
||||
- id: remove_spanId
|
||||
type: remove
|
||||
if: '"spanId" in attributes'
|
||||
field: attributes.spanId
|
||||
- id: remove_trace_id
|
||||
type: remove
|
||||
if: '"trace_id" in attributes'
|
||||
field: attributes.trace_id
|
||||
output: remove_span_id
|
||||
- id: remove_span_id
|
||||
type: remove
|
||||
if: '"span_id" in attributes'
|
||||
field: attributes.span_id
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
@@ -109,10 +83,6 @@ processors:
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: 'signoz.collector.id'
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
@@ -130,7 +100,7 @@ processors:
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
|
||||
extensions:
|
||||
@@ -145,7 +115,6 @@ exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
@@ -197,6 +166,6 @@ service:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [logstransform/internal, batch]
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
Binary file not shown.
@@ -1,237 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NOTE: executable must be built with target OS and architecture set to linux/amd64
|
||||
// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
|
||||
|
||||
// The following code is adapted from the following source:
|
||||
// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
|
||||
|
||||
type bucket struct {
|
||||
upperBound float64
|
||||
count float64
|
||||
}
|
||||
|
||||
// buckets implements sort.Interface.
|
||||
type buckets []bucket
|
||||
|
||||
func (b buckets) Len() int { return len(b) }
|
||||
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
|
||||
|
||||
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
|
||||
// buckets will be sorted by upperBound by this function (i.e. no sorting
|
||||
// needed before calling this function). The quantile value is interpolated
|
||||
// assuming a linear distribution within a bucket. However, if the quantile
|
||||
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
|
||||
// returned. A natural lower bound of 0 is assumed if the upper bound of the
|
||||
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
|
||||
// happens linearly between 0 and the upper bound of the lowest bucket.
|
||||
// However, if the lowest bucket has an upper bound less or equal 0, this upper
|
||||
// bound is returned if the quantile falls into the lowest bucket.
|
||||
//
|
||||
// There are a number of special cases (once we have a way to report errors
|
||||
// happening during evaluations of AST functions, we should report those
|
||||
// explicitly):
|
||||
//
|
||||
// If 'buckets' has 0 observations, NaN is returned.
|
||||
//
|
||||
// If 'buckets' has fewer than 2 elements, NaN is returned.
|
||||
//
|
||||
// If the highest bucket is not +Inf, NaN is returned.
|
||||
//
|
||||
// If q==NaN, NaN is returned.
|
||||
//
|
||||
// If q<0, -Inf is returned.
|
||||
//
|
||||
// If q>1, +Inf is returned.
|
||||
func bucketQuantile(q float64, buckets buckets) float64 {
|
||||
if math.IsNaN(q) {
|
||||
return math.NaN()
|
||||
}
|
||||
if q < 0 {
|
||||
return math.Inf(-1)
|
||||
}
|
||||
if q > 1 {
|
||||
return math.Inf(+1)
|
||||
}
|
||||
sort.Sort(buckets)
|
||||
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
|
||||
return math.NaN()
|
||||
}
|
||||
|
||||
buckets = coalesceBuckets(buckets)
|
||||
ensureMonotonic(buckets)
|
||||
|
||||
if len(buckets) < 2 {
|
||||
return math.NaN()
|
||||
}
|
||||
observations := buckets[len(buckets)-1].count
|
||||
if observations == 0 {
|
||||
return math.NaN()
|
||||
}
|
||||
rank := q * observations
|
||||
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
|
||||
|
||||
if b == len(buckets)-1 {
|
||||
return buckets[len(buckets)-2].upperBound
|
||||
}
|
||||
if b == 0 && buckets[0].upperBound <= 0 {
|
||||
return buckets[0].upperBound
|
||||
}
|
||||
var (
|
||||
bucketStart float64
|
||||
bucketEnd = buckets[b].upperBound
|
||||
count = buckets[b].count
|
||||
)
|
||||
if b > 0 {
|
||||
bucketStart = buckets[b-1].upperBound
|
||||
count -= buckets[b-1].count
|
||||
rank -= buckets[b-1].count
|
||||
}
|
||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
||||
}
|
||||
|
||||
// coalesceBuckets merges buckets with the same upper bound.
|
||||
//
|
||||
// The input buckets must be sorted.
|
||||
func coalesceBuckets(buckets buckets) buckets {
|
||||
last := buckets[0]
|
||||
i := 0
|
||||
for _, b := range buckets[1:] {
|
||||
if b.upperBound == last.upperBound {
|
||||
last.count += b.count
|
||||
} else {
|
||||
buckets[i] = last
|
||||
last = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
buckets[i] = last
|
||||
return buckets[:i+1]
|
||||
}
|
||||
|
||||
// The assumption that bucket counts increase monotonically with increasing
|
||||
// upperBound may be violated during:
|
||||
//
|
||||
// * Recording rule evaluation of histogram_quantile, especially when rate()
|
||||
// has been applied to the underlying bucket timeseries.
|
||||
// * Evaluation of histogram_quantile computed over federated bucket
|
||||
// timeseries, especially when rate() has been applied.
|
||||
//
|
||||
// This is because scraped data is not made available to rule evaluation or
|
||||
// federation atomically, so some buckets are computed with data from the
|
||||
// most recent scrapes, but the other buckets are missing data from the most
|
||||
// recent scrape.
|
||||
//
|
||||
// Monotonicity is usually guaranteed because if a bucket with upper bound
|
||||
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
|
||||
// have counted all c1 observations and perhaps more, so that c >= c1.
|
||||
//
|
||||
// Randomly interspersed partial sampling breaks that guarantee, and rate()
|
||||
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
|
||||
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
|
||||
// monotonicity is broken. It is exacerbated by rate() because under normal
|
||||
// operation, cumulative counting of buckets will cause the bucket counts to
|
||||
// diverge such that small differences from missing samples are not a problem.
|
||||
// rate() removes this divergence.)
|
||||
//
|
||||
// bucketQuantile depends on that monotonicity to do a binary search for the
|
||||
// bucket with the φ-quantile count, so breaking the monotonicity
|
||||
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
|
||||
//
|
||||
// As a somewhat hacky solution until ingestion is atomic per scrape, we
|
||||
// calculate the "envelope" of the histogram buckets, essentially removing
|
||||
// any decreases in the count between successive buckets.
|
||||
|
||||
func ensureMonotonic(buckets buckets) {
|
||||
max := buckets[0].count
|
||||
for i := 1; i < len(buckets); i++ {
|
||||
switch {
|
||||
case buckets[i].count > max:
|
||||
max = buckets[i].count
|
||||
case buckets[i].count < max:
|
||||
buckets[i].count = max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// End of copied code.
|
||||
|
||||
func readLines() []string {
|
||||
r := bufio.NewReader(os.Stdin)
|
||||
bytes := []byte{}
|
||||
lines := []string{}
|
||||
for {
|
||||
line, isPrefix, err := r.ReadLine()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
bytes = append(bytes, line...)
|
||||
if !isPrefix {
|
||||
str := strings.TrimSpace(string(bytes))
|
||||
if len(str) > 0 {
|
||||
lines = append(lines, str)
|
||||
bytes = []byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(bytes) > 0 {
|
||||
lines = append(lines, string(bytes))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func main() {
|
||||
lines := readLines()
|
||||
for _, text := range lines {
|
||||
// Example input
|
||||
// "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
|
||||
// bounds - counts - quantile
|
||||
parts := strings.Split(text, "\",")
|
||||
|
||||
var bucketNumbers []float64
|
||||
// Strip the ends with square brackets
|
||||
text = parts[0][2 : len(parts[0])-1]
|
||||
// Parse the bucket bounds
|
||||
for _, num := range strings.Split(text, ",") {
|
||||
num = strings.TrimSpace(num)
|
||||
number, err := strconv.ParseFloat(num, 64)
|
||||
if err == nil {
|
||||
bucketNumbers = append(bucketNumbers, number)
|
||||
}
|
||||
}
|
||||
|
||||
var bucketCounts []float64
|
||||
// Strip the ends with square brackets
|
||||
text = parts[1][2 : len(parts[1])-1]
|
||||
// Parse the bucket counts
|
||||
for _, num := range strings.Split(text, ",") {
|
||||
num = strings.TrimSpace(num)
|
||||
number, err := strconv.ParseFloat(num, 64)
|
||||
if err == nil {
|
||||
bucketCounts = append(bucketCounts, number)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the quantile
|
||||
q, err := strconv.ParseFloat(parts[2], 64)
|
||||
var b buckets
|
||||
|
||||
if err == nil {
|
||||
for i := 0; i < len(bucketNumbers); i++ {
|
||||
b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
|
||||
}
|
||||
}
|
||||
fmt.Println(bucketQuantile(q, b))
|
||||
}
|
||||
}
|
||||
@@ -24,16 +24,8 @@ server {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||
proxy_pass http://query-service:8080;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# dont buffer the data send it directly to client.
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
|
||||
@@ -36,9 +36,9 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' || `uname -m` == 'aarch64' ]]
|
||||
}
|
||||
# is_arm64(){
|
||||
# [[ `uname -m` == 'arm64' ]]
|
||||
# }
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
@@ -48,20 +48,10 @@ check_os() {
|
||||
return
|
||||
fi
|
||||
|
||||
if is_arm64; then
|
||||
arch="arm64"
|
||||
arch_official="aarch64"
|
||||
else
|
||||
arch="amd64"
|
||||
arch_official="x86_64"
|
||||
fi
|
||||
|
||||
platform=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
|
||||
|
||||
case "$os_name" in
|
||||
Ubuntu*|Pop!_OS)
|
||||
Ubuntu*)
|
||||
desired_os=1
|
||||
os="ubuntu"
|
||||
package_manager="apt-get"
|
||||
@@ -91,11 +81,6 @@ check_os() {
|
||||
os="centos"
|
||||
package_manager="yum"
|
||||
;;
|
||||
Rocky*)
|
||||
desired_os=1
|
||||
os="centos"
|
||||
package_manager="yum"
|
||||
;;
|
||||
SLES*)
|
||||
desired_os=1
|
||||
os="sles"
|
||||
@@ -135,7 +120,7 @@ check_ports_occupied() {
|
||||
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
exit 1
|
||||
@@ -153,7 +138,7 @@ install_docker() {
|
||||
$apt_cmd install software-properties-common gnupg-agent
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||
$sudo_cmd add-apt-repository \
|
||||
"deb [arch=$arch] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||
$apt_cmd update
|
||||
echo "Installing docker"
|
||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||
@@ -188,20 +173,12 @@ install_docker() {
|
||||
|
||||
}
|
||||
|
||||
compose_version () {
|
||||
local compose_version
|
||||
compose_version="$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\" -f4)"
|
||||
echo "${compose_version:-v2.18.1}"
|
||||
}
|
||||
|
||||
install_docker_compose() {
|
||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||
echo "++++++++++++++++++++++++"
|
||||
echo "Installing docker-compose"
|
||||
compose_url="https://github.com/docker/compose/releases/download/$(compose_version)/docker-compose-$platform-$arch_official"
|
||||
echo "Downloading docker-compose from $compose_url"
|
||||
$sudo_cmd curl -L "$compose_url" -o /usr/local/bin/docker-compose
|
||||
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
echo "docker-compose installed!"
|
||||
@@ -246,7 +223,7 @@ wait_for_containers_start() {
|
||||
|
||||
# The while loop is important because for-loops don't work for dynamic values
|
||||
while [[ $timeout -gt 0 ]]; do
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
@@ -267,7 +244,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
echo ""
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -295,7 +272,7 @@ request_sudo() {
|
||||
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompted."
|
||||
echo -e "Please enter your sudo password, if prompt."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
@@ -518,7 +495,7 @@ if [[ $status_code -ne 200 ]]; then
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -534,15 +511,13 @@ else
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
echo "ℹ️ By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "👉 Need help in Getting Started?"
|
||||
echo "👉 Need help Getting Started?"
|
||||
echo -e "Join us on Slack https://signoz.io/slack"
|
||||
echo ""
|
||||
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.21-bookworm AS builder
|
||||
FROM golang:1.17-buster AS builder
|
||||
|
||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
||||
ARG LD_FLAGS
|
||||
@@ -22,7 +22,7 @@ RUN cd ee/query-service \
|
||||
|
||||
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.16.7
|
||||
FROM alpine:3.7
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -39,9 +39,6 @@ COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-
|
||||
# copy prometheus YAML config
|
||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||
|
||||
# Make query-service executable for non-root users
|
||||
RUN chmod 755 /root /root/query-service
|
||||
|
||||
# run the binary
|
||||
ENTRYPOINT ["./query-service"]
|
||||
|
||||
|
||||
@@ -2,33 +2,23 @@ package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferDelta bool
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
DataConnector interfaces.DataConnector
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -40,18 +30,10 @@ type APIHandler struct {
|
||||
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PerferDelta: opts.PreferDelta,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
MaxIdleConns: opts.MaxIdleConns,
|
||||
MaxOpenConns: opts.MaxOpenConns,
|
||||
DialTimeout: opts.DialTimeout,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
})
|
||||
Reader: opts.DataConnector,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -86,75 +68,64 @@ func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
am.AdminAccess(ah.listLicenses)).
|
||||
baseapp.AdminAccess(ah.listLicenses)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
am.AdminAccess(ah.applyLicense)).
|
||||
baseapp.AdminAccess(ah.applyLicense)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
am.OpenAccess(ah.getFeatureFlags)).
|
||||
baseapp.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
am.OpenAccess(ah.precheckLogin)).
|
||||
baseapp.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
am.OpenAccess(ah.receiveSAML)).
|
||||
baseapp.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/complete/google",
|
||||
am.OpenAccess(ah.receiveGoogleAuth)).
|
||||
baseapp.OpenAccess(ah.receiveGoogleAuth)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
am.AdminAccess(ah.listDomainsByOrg)).
|
||||
baseapp.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
am.AdminAccess(ah.postDomain)).
|
||||
baseapp.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.putDomain)).
|
||||
baseapp.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.deleteDomain)).
|
||||
baseapp.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", baseapp.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/metrics/query_range", baseapp.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
||||
|
||||
// PAT APIs
|
||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
||||
|
||||
ah.APIHandler.RegisterRoutes(router, am)
|
||||
ah.APIHandler.RegisterRoutes(router)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
versionResponse := basemodel.GetVersionResponse{
|
||||
Version: version,
|
||||
EE: "Y",
|
||||
SetupCompleted: ah.SetupCompleted,
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, versionResponse)
|
||||
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
@@ -88,16 +87,9 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil {
|
||||
if err != nil || invite == nil {
|
||||
zap.S().Errorf("failed to validate invite token", err)
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if invite == nil {
|
||||
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
@@ -107,13 +99,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &basemodel.PrecheckResponse{
|
||||
precheckResp := &model.PrecheckResponse{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, create user and respond precheck data
|
||||
// so is enabled, create user and respond precheck data
|
||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
@@ -198,7 +190,7 @@ func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string)
|
||||
}
|
||||
|
||||
// receiveGoogleAuth completes google OAuth response and forwards a request
|
||||
// to front-end to sign user in
|
||||
// to front-end to sign user in
|
||||
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
@@ -229,15 +221,15 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// now that we have domain, use domain to fetch sso settings.
|
||||
// prepare google callback handler using parsedState -
|
||||
// now that we have domain, use domain to fetch sso settings.
|
||||
// prepare google callback handler using parsedState -
|
||||
// which contains redirect URL (front-end endpoint)
|
||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||
|
||||
@@ -247,7 +239,7 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
||||
@@ -258,12 +250,15 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
@@ -292,13 +287,13 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
|
||||
@@ -332,6 +327,6 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
@@ -2,23 +2,9 @@ package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
featureSet, err := ah.FF().GetFeatureFlags()
|
||||
if err != nil {
|
||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if ah.opts.PreferSpanMetrics {
|
||||
for idx := range featureSet {
|
||||
feature := &featureSet[idx]
|
||||
if feature.Name == basemodel.UseSpanMetrics {
|
||||
featureSet[idx].Active = true
|
||||
}
|
||||
}
|
||||
}
|
||||
featureSet := ah.FF().GetFeatureFlags()
|
||||
ah.Respond(w, featureSet)
|
||||
}
|
||||
|
||||
@@ -137,8 +137,8 @@ func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request
|
||||
var s basemodel.Series
|
||||
s.QueryName = name
|
||||
s.Labels = v.Metric.Copy().Map()
|
||||
for _, p := range v.Floats {
|
||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.F})
|
||||
for _, p := range v.Points {
|
||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
|
||||
}
|
||||
seriesList = append(seriesList, &s)
|
||||
}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func generatePATToken() string {
|
||||
// Generate a 32-byte random token.
|
||||
token := make([]byte, 32)
|
||||
rand.Read(token)
|
||||
// Encode the token in base64.
|
||||
encodedToken := base64.StdEncoding.EncodeToString(token)
|
||||
return encodedToken
|
||||
}
|
||||
|
||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.PAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// All the PATs are associated with the user creating the PAT. Hence, the permissions
|
||||
// associated with the PAT is also equivalent to that of the user.
|
||||
req.UserID = user.Id
|
||||
req.CreatedAt = time.Now().Unix()
|
||||
req.Token = generatePATToken()
|
||||
|
||||
zap.S().Debugf("Got PAT request: %+v", req)
|
||||
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
zap.S().Infof("Get PATs for user: %+v", user.Id)
|
||||
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, pats)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
id := mux.Vars(r)["id"]
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
if pat.UserID != user.Id {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: fmt.Errorf("unauthorized PAT delete request"),
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
zap.S().Debugf("Delete PAT with id: %+v", id)
|
||||
if apierr := ah.AppDao().DeletePAT(ctx, id); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, map[string]string{"data": "pat deleted successfully"})
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -17,15 +15,8 @@ type ClickhouseReader struct {
|
||||
*basechr.ClickHouseReader
|
||||
}
|
||||
|
||||
func NewDataConnector(
|
||||
localDB *sqlx.DB,
|
||||
promConfigPath string,
|
||||
lm interfaces.FeatureLookup,
|
||||
maxIdleConns int,
|
||||
maxOpenConns int,
|
||||
dialTimeout time.Duration,
|
||||
) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout)
|
||||
func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
|
||||
@@ -22,24 +22,14 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
@@ -47,21 +37,13 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
PromConfigPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferDelta bool
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
@@ -82,9 +64,6 @@ type Server struct {
|
||||
// feature flags
|
||||
featureLookup baseint.FeatureLookup
|
||||
|
||||
// Usage manager
|
||||
usageManager *usage.Manager
|
||||
|
||||
unavailableChannel chan healthcheck.Status
|
||||
}
|
||||
|
||||
@@ -101,8 +80,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
@@ -125,26 +102,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
storage := os.Getenv("STORAGE")
|
||||
if storage == "clickhouse" {
|
||||
zap.S().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(
|
||||
localDB,
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.MaxIdleConns,
|
||||
serverOptions.MaxOpenConns,
|
||||
serverOptions.DialTimeout,
|
||||
)
|
||||
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
} else {
|
||||
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
@@ -153,30 +115,12 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
reader,
|
||||
serverOptions.DisableRules,
|
||||
lm)
|
||||
serverOptions.DisableRules)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate agent config handler
|
||||
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
|
||||
if err != nil {
|
||||
@@ -190,18 +134,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferDelta: serverOptions.PreferDelta,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
MaxIdleConns: serverOptions.MaxIdleConns,
|
||||
MaxOpenConns: serverOptions.MaxOpenConns,
|
||||
DialTimeout: serverOptions.DialTimeout,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
DataConnector: reader,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
@@ -215,7 +152,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
ruleManager: rm,
|
||||
serverOptions: serverOptions,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
usageManager: usageManager,
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler)
|
||||
@@ -251,7 +187,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
@@ -266,33 +202,13 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
|
||||
r := mux.NewRouter()
|
||||
|
||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
||||
if len(patToken) > 0 {
|
||||
zap.S().Debugf("Received a non-zero length PAT token")
|
||||
ctx := context.Background()
|
||||
dao := apiHandler.AppDao()
|
||||
|
||||
user, err := dao.GetUserByPAT(ctx, patToken)
|
||||
if err == nil && user != nil {
|
||||
zap.S().Debugf("Found valid PAT user: %+v", user)
|
||||
return user, nil
|
||||
}
|
||||
if err != nil {
|
||||
zap.S().Debugf("Error while getting user for PAT: %+v", err)
|
||||
}
|
||||
}
|
||||
return baseauth.GetUserFromRequest(r)
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterMetricsRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterRoutes(r)
|
||||
apiHandler.RegisterMetricsRoutes(r)
|
||||
apiHandler.RegisterLogsRoutes(r)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
@@ -316,7 +232,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
|
||||
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -328,7 +244,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -355,42 +271,42 @@ func (lrw *loggingResponseWriter) Flush() {
|
||||
|
||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
||||
|
||||
var requestBody map[string]interface{}
|
||||
data := map[string]interface{}{}
|
||||
var postData *basemodel.QueryRangeParamsV2
|
||||
|
||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||
if r.Body != nil {
|
||||
bodyBytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
r.Body.Close() // must close
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
json.Unmarshal(bodyBytes, &postData)
|
||||
bodyBytes, _ := ioutil.ReadAll(r.Body)
|
||||
r.Body.Close() // must close
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
json.Unmarshal(bodyBytes, &requestBody)
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
signozMetricNotFound := false
|
||||
compositeMetricQuery, compositeMetricQueryExists := requestBody["compositeMetricQuery"]
|
||||
compositeMetricQueryMap := compositeMetricQuery.(map[string]interface{})
|
||||
signozMetricFound := false
|
||||
|
||||
if postData != nil {
|
||||
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
|
||||
|
||||
if postData.CompositeMetricQuery != nil {
|
||||
data["queryType"] = postData.CompositeMetricQuery.QueryType
|
||||
data["panelType"] = postData.CompositeMetricQuery.PanelType
|
||||
if compositeMetricQueryExists {
|
||||
signozMetricFound = telemetry.GetInstance().CheckSigNozMetrics(compositeMetricQueryMap)
|
||||
queryType, queryTypeExists := compositeMetricQueryMap["queryType"]
|
||||
if queryTypeExists {
|
||||
data["queryType"] = queryType
|
||||
}
|
||||
panelType, panelTypeExists := compositeMetricQueryMap["panelType"]
|
||||
if panelTypeExists {
|
||||
data["panelType"] = panelType
|
||||
}
|
||||
|
||||
data["datasource"] = postData.DataSource
|
||||
}
|
||||
|
||||
if signozMetricNotFound {
|
||||
datasource, datasourceExists := requestBody["dataSource"]
|
||||
if datasourceExists {
|
||||
data["datasource"] = datasource
|
||||
}
|
||||
|
||||
if !signozMetricFound {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
|
||||
}
|
||||
@@ -444,7 +360,7 @@ func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
@@ -530,7 +446,7 @@ func (s *Server) Start() error {
|
||||
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
|
||||
privatePort = port
|
||||
}
|
||||
|
||||
fmt.Println("starting private http")
|
||||
go func() {
|
||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||
|
||||
@@ -546,40 +462,6 @@ func (s *Server) Start() error {
|
||||
|
||||
}()
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||
err := opamp.InitializeAndStartServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
|
||||
if err != nil {
|
||||
zap.S().Info("opamp ws server failed to start", err)
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
if s.httpServer != nil {
|
||||
if err := s.httpServer.Shutdown(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.privateHTTP != nil {
|
||||
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
opamp.StopServer()
|
||||
|
||||
if s.ruleManager != nil {
|
||||
s.ruleManager.Stop()
|
||||
}
|
||||
|
||||
// stop usage manager
|
||||
s.usageManager.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -589,8 +471,7 @@ func makeRulesManager(
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
disableRules bool,
|
||||
fm baseInterface.FeatureLookup) (*rules.Manager, error) {
|
||||
disableRules bool) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -617,7 +498,6 @@ func makeRulesManager(
|
||||
Context: context.Background(),
|
||||
Logger: nil,
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -3,7 +3,6 @@ package dao
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
@@ -21,10 +20,11 @@ type ModelDao interface {
|
||||
DB() *sqlx.DB
|
||||
|
||||
// auth methods
|
||||
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
|
||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||
|
||||
|
||||
// org domain (auth domains) CRUD ops
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
@@ -32,11 +32,4 @@ type ModelDao interface {
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
|
||||
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError)
|
||||
DeletePAT(ctx context.Context, id string) basemodel.BaseApiError
|
||||
}
|
||||
|
||||
@@ -5,61 +5,16 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
||||
// get auth domain from email domain
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
|
||||
if apierr != nil {
|
||||
zap.S().Errorf("failed to get domain from email", apierr)
|
||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||
}
|
||||
|
||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||
}
|
||||
|
||||
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
user := &basemodel.User{
|
||||
Id: uuid.NewString(),
|
||||
Name: "",
|
||||
Email: email,
|
||||
Password: hash,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
ProfilePictureURL: "", // Currently unused
|
||||
GroupId: group.Id,
|
||||
OrgId: domain.OrgId,
|
||||
}
|
||||
|
||||
user, apiErr = m.CreateUser(ctx, user, false)
|
||||
if apiErr != nil {
|
||||
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
return user, nil
|
||||
|
||||
}
|
||||
|
||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||
// is successfully parsed (i.e. valid email is available)
|
||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||
|
||||
@@ -69,20 +24,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||
}
|
||||
|
||||
user := &basemodel.User{}
|
||||
|
||||
if userPayload == nil {
|
||||
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||
user = newUser
|
||||
if apiErr != nil {
|
||||
zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error())
|
||||
return "", apiErr
|
||||
}
|
||||
} else {
|
||||
user = &userPayload.User
|
||||
}
|
||||
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to generate token for SSO login user", err)
|
||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||
@@ -91,7 +33,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
user.Id,
|
||||
userPayload.User.Id,
|
||||
tokenStore.RefreshJwt), nil
|
||||
}
|
||||
|
||||
@@ -120,10 +62,10 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
||||
|
||||
// PrecheckLogin is called when the login or signup page is loaded
|
||||
// to check sso login is to be prompted
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
|
||||
|
||||
// assume user is valid unless proven otherwise
|
||||
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||
resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||
|
||||
// check if email is a valid user
|
||||
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
||||
@@ -134,7 +76,6 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
if userPayload == nil {
|
||||
resp.IsUser = false
|
||||
}
|
||||
|
||||
ssoAvailable := true
|
||||
err := m.checkFeature(model.SSO)
|
||||
if err != nil {
|
||||
@@ -150,8 +91,6 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
|
||||
if ssoAvailable {
|
||||
|
||||
resp.IsUser = true
|
||||
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -28,70 +28,29 @@ type StoredDomain struct {
|
||||
|
||||
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
||||
// user domain. The domain is further used to process validity of the response.
|
||||
// when sending login request to IdP we send relay state as URL (site url)
|
||||
// with domainId or domainName as query parameter.
|
||||
// when sending login request to IdP we send relay state as URL (site url)
|
||||
// with domainId as query parameter.
|
||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
||||
// derive domain id from relay state now
|
||||
var domainIdStr string
|
||||
var domainNameStr string
|
||||
var domain *model.OrgDomain
|
||||
|
||||
var domainIdStr string
|
||||
for k, v := range relayState.Query() {
|
||||
if k == "domainId" && len(v) > 0 {
|
||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||
}
|
||||
if k == "domainName" && len(v) > 0 {
|
||||
domainNameStr = v[0]
|
||||
}
|
||||
}
|
||||
|
||||
if domainIdStr != "" {
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to parse domainId from relay state", err)
|
||||
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
||||
}
|
||||
|
||||
domain, err = m.GetDomain(ctx, domainId)
|
||||
if (err != nil) || domain == nil {
|
||||
zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error())
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
}
|
||||
|
||||
if domainNameStr != "" {
|
||||
|
||||
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||
domain = domainFromDB
|
||||
if (err != nil) || domain == nil {
|
||||
zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error())
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
}
|
||||
if domain != nil {
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find domain received in IdP response")
|
||||
}
|
||||
|
||||
// GetDomainByName returns org domain for a given domain name
|
||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
zap.S().Errorf("failed to parse domain id from relay state", err)
|
||||
return nil, fmt.Errorf("failed to parse response from IdP response")
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
domain, err := m.GetDomain(ctx, domainId)
|
||||
if (err != nil) || domain == nil {
|
||||
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
@@ -110,7 +69,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
return domain, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
@@ -247,7 +206,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
return domain, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
@@ -48,17 +48,7 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
updated_at INTEGER,
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY(org_id) REFERENCES organizations(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id TEXT NOT NULL,
|
||||
token TEXT NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
expires_at INTEGER NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||
);
|
||||
`
|
||||
);`
|
||||
|
||||
_, err = m.DB().Exec(table_schema)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
||||
p.UserID,
|
||||
p.Token,
|
||||
p.Name,
|
||||
p.CreatedAt,
|
||||
p.ExpiresAt)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=?;`, userID); err != nil {
|
||||
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||
}
|
||||
return pats, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx, `DELETE from personal_access_tokens where id=?;`, id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed to delete PAT, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("failed to delete PAT"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=?;`, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=?;`, id); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token"),
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
||||
users := []basemodel.UserPayload{}
|
||||
|
||||
query := `SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
u.email,
|
||||
u.password,
|
||||
u.created_at,
|
||||
u.profile_picture_url,
|
||||
u.org_id,
|
||||
u.group_id
|
||||
FROM users u, personal_access_tokens p
|
||||
WHERE u.id = p.user_id and p.token=?;`
|
||||
|
||||
if err := m.DB().Select(&users, query, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
return &users[0], nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -10,7 +9,6 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/license/sqlite"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -127,79 +125,3 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) CreateFeature(req *basemodel.Feature) *basemodel.ApiError {
|
||||
|
||||
_, err := r.db.Exec(
|
||||
`INSERT INTO feature_status (name, active, usage, usage_limit, route)
|
||||
VALUES (?, ?, ?, ?, ?);`,
|
||||
req.Name, req.Active, req.Usage, req.UsageLimit, req.Route)
|
||||
if err != nil {
|
||||
return &basemodel.ApiError{Typ: basemodel.ErrorInternal, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetFeature(featureName string) (basemodel.Feature, error) {
|
||||
|
||||
var feature basemodel.Feature
|
||||
|
||||
err := r.db.Get(&feature,
|
||||
`SELECT * FROM feature_status WHERE name = ?;`, featureName)
|
||||
if err != nil {
|
||||
return feature, err
|
||||
}
|
||||
if feature.Name == "" {
|
||||
return feature, basemodel.ErrFeatureUnavailable{Key: featureName}
|
||||
}
|
||||
return feature, nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
|
||||
|
||||
var feature []basemodel.Feature
|
||||
|
||||
err := r.db.Select(&feature,
|
||||
`SELECT * FROM feature_status;`)
|
||||
if err != nil {
|
||||
return feature, err
|
||||
}
|
||||
|
||||
return feature, nil
|
||||
}
|
||||
|
||||
func (r *Repo) UpdateFeature(req basemodel.Feature) error {
|
||||
|
||||
_, err := r.db.Exec(
|
||||
`UPDATE feature_status SET active = ?, usage = ?, usage_limit = ?, route = ? WHERE name = ?;`,
|
||||
req.Active, req.Usage, req.UsageLimit, req.Route, req.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
|
||||
// get a feature by name, if it doesn't exist, create it. If it does exist, update it.
|
||||
for _, feature := range req {
|
||||
currentFeature, err := r.GetFeature(feature.Name)
|
||||
if err != nil && err == sql.ErrNoRows {
|
||||
err := r.CreateFeature(&feature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
feature.Usage = currentFeature.Usage
|
||||
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
|
||||
feature.Active = false
|
||||
}
|
||||
err = r.UpdateFeature(feature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -96,11 +96,6 @@ func (lm *Manager) SetActive(l *model.License) {
|
||||
lm.activeFeatures = l.FeatureSet
|
||||
// set default features
|
||||
setDefaultFeatures(lm)
|
||||
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.S().Panicf("Couldn't activate features: %v", err)
|
||||
}
|
||||
if !lm.validatorRunning {
|
||||
// we want to make sure only one validator runs,
|
||||
// we already have lock() so good to go
|
||||
@@ -111,7 +106,9 @@ func (lm *Manager) SetActive(l *model.License) {
|
||||
}
|
||||
|
||||
func setDefaultFeatures(lm *Manager) {
|
||||
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
||||
for k, v := range baseconstants.DEFAULT_FEATURE_SET {
|
||||
lm.activeFeatures[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// LoadActiveLicense loads the most recent active license
|
||||
@@ -126,13 +123,8 @@ func (lm *Manager) LoadActiveLicense() error {
|
||||
} else {
|
||||
zap.S().Info("No active license found, defaulting to basic plan")
|
||||
// if no active license is found, we default to basic(free) plan with all default features
|
||||
lm.activeFeatures = model.BasicPlan
|
||||
lm.activeFeatures = basemodel.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.S().Error("Couldn't initialize features: ", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -299,31 +291,18 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
||||
// CheckFeature will be internally used by backend routines
|
||||
// for feature gating
|
||||
func (lm *Manager) CheckFeature(featureKey string) error {
|
||||
feature, err := lm.repo.GetFeature(featureKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if feature.Active {
|
||||
return nil
|
||||
if value, ok := lm.activeFeatures[featureKey]; ok {
|
||||
if value {
|
||||
return nil
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
|
||||
// GetFeatureFlags returns current active features
|
||||
func (lm *Manager) GetFeatureFlags() (basemodel.FeatureSet, error) {
|
||||
return lm.repo.GetAllFeatures()
|
||||
}
|
||||
|
||||
func (lm *Manager) InitFeatures(features basemodel.FeatureSet) error {
|
||||
return lm.repo.InitFeatures(features)
|
||||
}
|
||||
|
||||
func (lm *Manager) UpdateFeatureFlag(feature basemodel.Feature) error {
|
||||
return lm.repo.UpdateFeature(feature)
|
||||
}
|
||||
|
||||
func (lm *Manager) GetFeatureFlag(key string) (basemodel.Feature, error) {
|
||||
return lm.repo.GetFeature(key)
|
||||
func (lm *Manager) GetFeatureFlags() basemodel.FeatureSet {
|
||||
return lm.activeFeatures
|
||||
}
|
||||
|
||||
// GetRepo return the license repo
|
||||
|
||||
@@ -2,7 +2,6 @@ package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
@@ -34,19 +33,5 @@ func InitDB(db *sqlx.DB) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in creating licenses table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
||||
name TEXT PRIMARY KEY,
|
||||
active bool,
|
||||
usage INTEGER DEFAULT 0,
|
||||
usage_limit INTEGER DEFAULT 0,
|
||||
route TEXT
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in creating feature_status table: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,78 +3,30 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
func initZapLog() *zap.Logger {
|
||||
config := zap.NewDevelopmentConfig()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
|
||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
|
||||
defaultLogLevel := zapcore.DebugLevel
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("query-service"),
|
||||
)
|
||||
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
)
|
||||
|
||||
if enableQueryServiceLogOTLPExport == true {
|
||||
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
|
||||
if err != nil {
|
||||
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
|
||||
} else {
|
||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||
if err != nil {
|
||||
logExportBatchSizeInt = 1000
|
||||
}
|
||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||
BatchSize: logExportBatchSizeInt,
|
||||
ResourceSchema: semconv.SchemaURL,
|
||||
Resource: res,
|
||||
}))
|
||||
core = zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
|
||||
)
|
||||
}
|
||||
}
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
|
||||
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
var promConfigPath string
|
||||
|
||||
// disables rule execution but allows change to the rule definition
|
||||
var disableRules bool
|
||||
@@ -82,27 +34,12 @@ func main() {
|
||||
// the url used to build link in the alert messages in slack and other systems
|
||||
var ruleRepoURL string
|
||||
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferDelta bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
var maxOpenConns int
|
||||
var dialTimeout time.Duration
|
||||
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
loggerMgr := initZapLog()
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
|
||||
@@ -110,17 +47,11 @@ func main() {
|
||||
version.PrintVersion()
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferDelta: preferDelta,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
@@ -154,7 +85,6 @@ func main() {
|
||||
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
||||
case <-signalsChannel:
|
||||
logger.Fatal("Received OS Interrupt Signal ... ")
|
||||
server.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,18 @@ import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
// PrecheckResponse contains login precheck response
|
||||
type PrecheckResponse struct {
|
||||
SSO bool `json:"sso"`
|
||||
SsoUrl string `json:"ssoUrl"`
|
||||
CanSelfRegister bool `json:"canSelfRegister"`
|
||||
IsUser bool `json:"isUser"`
|
||||
SsoError string `json:"ssoError"`
|
||||
}
|
||||
|
||||
// GettableInvitation overrides base object and adds precheck into
|
||||
// response
|
||||
type GettableInvitation struct {
|
||||
*basemodel.InvitationResponseObject
|
||||
Precheck *basemodel.PrecheckResponse `json:"precheck"`
|
||||
Precheck *PrecheckResponse `json:"precheck"`
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
@@ -62,6 +61,7 @@ func InternalError(err error) *ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// InternalErrorStr returns a ApiError object of internal type for string input
|
||||
func InternalErrorStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
@@ -69,7 +69,6 @@ func InternalErrorStr(s string) *ApiError {
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
ErrorNone basemodel.ErrorType = ""
|
||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
package model
|
||||
|
||||
type PAT struct {
|
||||
Id string `json:"id" db:"id"`
|
||||
UserID string `json:"userId" db:"user_id"`
|
||||
Token string `json:"token" db:"token"`
|
||||
Name string `json:"name" db:"name"`
|
||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
|
||||
}
|
||||
@@ -9,287 +9,23 @@ const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
|
||||
var BasicPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 5,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 5,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Basic: true,
|
||||
SSO: false,
|
||||
DisableUpsell: false,
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Pro: true,
|
||||
SSO: true,
|
||||
basemodel.SmartTraceDetail: true,
|
||||
basemodel.CustomMetricsFunction: true,
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Onboarding,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: ChatSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Enterprise: true,
|
||||
SSO: true,
|
||||
basemodel.SmartTraceDetail: true,
|
||||
basemodel.CustomMetricsFunction: true,
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/go-co-op/gocron"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
@@ -29,6 +28,9 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// send usage every 24 hour
|
||||
uploadFrequency = 24 * time.Hour
|
||||
|
||||
locker = stateUnlocked
|
||||
)
|
||||
|
||||
@@ -37,7 +39,12 @@ type Manager struct {
|
||||
|
||||
licenseRepo *license.Repo
|
||||
|
||||
scheduler *gocron.Scheduler
|
||||
// end the usage routine, this is important to gracefully
|
||||
// stopping usage reporting and protect in-consistent updates
|
||||
done chan struct{}
|
||||
|
||||
// terminated waits for the UsageExporter go routine to end
|
||||
terminated chan struct{}
|
||||
}
|
||||
|
||||
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||
@@ -46,7 +53,6 @@ func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn c
|
||||
// repository: repo,
|
||||
clickhouseConn: clickhouseConn,
|
||||
licenseRepo: licenseRepo,
|
||||
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
@@ -58,30 +64,37 @@ func (lm *Manager) Start() error {
|
||||
return fmt.Errorf("usage exporter is locked")
|
||||
}
|
||||
|
||||
_, err := lm.scheduler.Do(func() { lm.UploadUsage() })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// upload usage once when starting the service
|
||||
lm.UploadUsage()
|
||||
|
||||
lm.scheduler.StartAsync()
|
||||
go lm.UsageExporter(context.Background())
|
||||
|
||||
return nil
|
||||
}
|
||||
func (lm *Manager) UploadUsage() {
|
||||
ctx := context.Background()
|
||||
|
||||
func (lm *Manager) UsageExporter(ctx context.Context) {
|
||||
defer close(lm.terminated)
|
||||
|
||||
uploadTicker := time.NewTicker(uploadFrequency)
|
||||
defer uploadTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-uploadTicker.C:
|
||||
lm.UploadUsage(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsage(ctx context.Context) error {
|
||||
// check if license is present or not
|
||||
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
|
||||
return
|
||||
return fmt.Errorf("failed to get active license")
|
||||
}
|
||||
if license == nil {
|
||||
// we will not start the usage reporting if license is not present.
|
||||
zap.S().Info("no license present, skipping usage reporting")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
usages := []model.UsageDB{}
|
||||
@@ -107,8 +120,7 @@ func (lm *Manager) UploadUsage() {
|
||||
dbusages := []model.UsageDB{}
|
||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
for _, u := range dbusages {
|
||||
u.Type = db
|
||||
@@ -118,7 +130,7 @@ func (lm *Manager) UploadUsage() {
|
||||
|
||||
if len(usages) <= 0 {
|
||||
zap.S().Info("no snapshots to upload, skipping.")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
zap.S().Info("uploading usage data")
|
||||
@@ -127,15 +139,13 @@ func (lm *Manager) UploadUsage() {
|
||||
for _, usage := range usages {
|
||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||
if err != nil {
|
||||
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
usageData := model.Usage{}
|
||||
err = json.Unmarshal(usageDataBytes, &usageData)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
usageData.CollectorID = usage.CollectorID
|
||||
@@ -150,16 +160,20 @@ func (lm *Manager) UploadUsage() {
|
||||
LicenseKey: key,
|
||||
Usage: usagesPayload,
|
||||
}
|
||||
lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
||||
err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) {
|
||||
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
|
||||
for i := 1; i <= MaxRetries; i++ {
|
||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||
if apiErr != nil && i == MaxRetries {
|
||||
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
||||
// not returning error here since it is captured in the failed count
|
||||
return
|
||||
return nil
|
||||
} else if apiErr != nil {
|
||||
// sleeping for exponential backoff
|
||||
sleepDuration := RetryInterval * time.Duration(i)
|
||||
@@ -169,14 +183,11 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) Stop() {
|
||||
lm.scheduler.Stop()
|
||||
|
||||
zap.S().Debug("sending usage data before shutting down")
|
||||
// send usage before shutting down
|
||||
lm.UploadUsage()
|
||||
|
||||
close(lm.done)
|
||||
atomic.StoreUint32(&locker, stateUnlocked)
|
||||
<-lm.terminated
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"presets": [
|
||||
"@babel/preset-env",
|
||||
["@babel/preset-react", { "runtime": "automatic" }],
|
||||
"@babel/preset-react",
|
||||
"@babel/preset-typescript"
|
||||
],
|
||||
"plugins": [
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
node_modules
|
||||
.vscode
|
||||
build
|
||||
.env
|
||||
.git
|
||||
|
||||
@@ -16,7 +16,6 @@ module.exports = {
|
||||
'plugin:sonarjs/recommended',
|
||||
'plugin:import/errors',
|
||||
'plugin:import/warnings',
|
||||
'plugin:react/jsx-runtime',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
@@ -59,7 +58,7 @@ module.exports = {
|
||||
'react/no-array-index-key': 'error',
|
||||
'linebreak-style': [
|
||||
'error',
|
||||
process.env.platform === 'win32' ? 'windows' : 'unix',
|
||||
process.platform === 'win32' ? 'windows' : 'unix',
|
||||
],
|
||||
'@typescript-eslint/default-param-last': 'off',
|
||||
|
||||
@@ -103,10 +102,9 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
'func-style': ['error', 'declaration', { allowArrowFunctions: true }],
|
||||
'arrow-body-style': ['error', 'as-needed'],
|
||||
|
||||
// eslint rules need to remove
|
||||
'no-shadow': 'off',
|
||||
'@typescript-eslint/no-shadow': 'off',
|
||||
'import/no-cycle': 'off',
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && yarn run commitlint --edit $1
|
||||
cd frontend && npm run commitlint
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
network-timeout 600000
|
||||
save-prefix ""
|
||||
@@ -1,56 +0,0 @@
|
||||
# **Frontend Guidelines**
|
||||
|
||||
Embrace the spirit of collaboration and contribute to the success of our open-source project by adhering to these frontend development guidelines with precision and passion.
|
||||
|
||||
### React and Components
|
||||
|
||||
- Strive to create small and modular components, ensuring they are divided into individual pieces for improved maintainability and reusability.
|
||||
- Avoid passing inline objects or functions as props to React components, as they are recreated with each render cycle.
|
||||
Utilize careful memoization of functions and variables, balancing optimization efforts to prevent potential performance issues. [When to useMemo and useCallback](https://kentcdodds.com/blog/usememo-and-usecallback) by Kent C. Dodds is quite helpful for this scenario.
|
||||
- Minimize the use of inline functions whenever possible to enhance code readability and improve overall comprehension.
|
||||
- Employ the appropriate usage of useMemo and useCallback hooks for effective memoization of values and functions.
|
||||
- Determine the appropriate placement of components:
|
||||
- Pages should contain an aggregation of all components and containers.
|
||||
- Commonly used components should reside in the 'components' directory.
|
||||
- Parent components responsible for data manipulation should be placed in the 'container' directory.
|
||||
- Strategically decide where to store data, either in global state or local components:
|
||||
- Begin by storing data in local components and gradually transition to global state as necessary.
|
||||
- Avoid importing default namespace `React` as the project is using `v18` and `import React from 'react'` is not needed anymore.
|
||||
- When a function requires more than three arguments (except when memoized), encapsulate them within an object to enhance readability and reduce potential parameter complexity.
|
||||
|
||||
### API and Services
|
||||
|
||||
- Avoid incorporating business logic within API/Service files to maintain flexibility for consumers to handle it according to their specific needs.
|
||||
- Employ the use of the useQuery hook for fetching data and the useMutation hook for updating data, ensuring a consistent and efficient approach.
|
||||
- Utilize the useQueryClient hook when updating the cache, facilitating smooth and effective management of data within the application.
|
||||
|
||||
**Note -** In our project, we are utilizing React Query v3. To gain a comprehensive understanding of its features and implementation, we recommend referring to the [official documentation](https://tanstack.com/query/v3/docs/react/overview) as a valuable resource.
|
||||
|
||||
### Styling
|
||||
|
||||
- Refrain from using inline styling within React components to maintain separation of concerns and promote a more maintainable codebase.
|
||||
- Opt for using the rem unit instead of px values to ensure better scalability and responsiveness across different devices and screen sizes.
|
||||
|
||||
### Linting and Setup
|
||||
|
||||
- It is crucial to refrain from disabling ESLint and TypeScript errors within the project. If there is a specific rule that needs to be disabled, provide a clear and justified explanation for doing so. Maintaining the integrity of the linting and type-checking processes ensures code quality and consistency throughout the codebase.
|
||||
- In our project, we rely on several essential ESLint plugins, namely:
|
||||
- [plugin:@typescript-eslint](https://typescript-eslint.io/rules/)
|
||||
- [airbnb styleguide](https://github.com/airbnb/javascript)
|
||||
- [plugin:sonarjs](https://github.com/SonarSource/eslint-plugin-sonarjs)
|
||||
|
||||
To ensure compliance with our coding standards and best practices, we encourage you to refer to the documentation of these plugins. Familiarizing yourself with the ESLint rules they provide will help maintain code quality and consistency throughout the project.
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Ensure that component names are written in Capital Case, while the folder names should be in lowercase.
|
||||
- Keep all other elements, such as variables, functions, and file names, in lowercase.
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
- Ensure that functions are modularized and follow the Single Responsibility Principle (SRP). The function's name should accurately convey its purpose and functionality.
|
||||
- Semantic division of functions into smaller units should be prioritized for improved readability and maintainability.
|
||||
Aim to keep functions concise and avoid exceeding a maximum length of 40 lines to enhance code understandability and ease of maintenance.
|
||||
- Eliminate the use of hard-coded strings or enums, favoring a more flexible and maintainable approach.
|
||||
- Strive to internationalize all strings within the codebase to support localization and improve accessibility for users across different languages.
|
||||
- Minimize the usage of multiple if statements or switch cases within a function. Consider creating a mapper and separating logic into multiple functions for better code organization.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Builder stage
|
||||
FROM node:16.15.0 as builder
|
||||
FROM node:16.15.0-slim as builder
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -9,11 +9,8 @@ ARG TARGETARCH
|
||||
|
||||
WORKDIR /frontend
|
||||
|
||||
# Copy the package.json and .yarnrc files prior to install dependencies
|
||||
# Copy the package.json to install dependencies
|
||||
COPY package.json ./
|
||||
# Copy lock file
|
||||
COPY yarn.lock ./
|
||||
COPY .yarnrc ./
|
||||
|
||||
# Install the dependencies and make the folder
|
||||
RUN CI=1 yarn install
|
||||
@@ -24,7 +21,7 @@ COPY . .
|
||||
RUN yarn build
|
||||
|
||||
|
||||
FROM nginx:1.24.0-alpine
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
NODE_ENV="development"
|
||||
BUNDLE_ANALYSER="true"
|
||||
FRONTEND_API_ENDPOINT="http://localhost:3301/"
|
||||
INTERCOM_APP_ID="intercom-app-id"
|
||||
|
||||
PLAYWRIGHT_TEST_BASE_URL="http://localhost:3301"
|
||||
CI="1"
|
||||
@@ -15,19 +15,16 @@ const config: Config.InitialOptions = {
|
||||
useESM: true,
|
||||
},
|
||||
},
|
||||
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
|
||||
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
||||
preset: 'ts-jest/presets/js-with-ts-esm',
|
||||
transform: {
|
||||
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||
},
|
||||
transformIgnorePatterns: [
|
||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend)/)',
|
||||
],
|
||||
transformIgnorePatterns: ['node_modules/(?!(lodash-es)/)'],
|
||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
moduleDirectories: ['node_modules', 'src'],
|
||||
testEnvironment: 'jest-environment-jsdom',
|
||||
testEnvironmentOptions: {
|
||||
'jest-playwright': {
|
||||
browsers: ['chromium', 'firefox', 'webkit'],
|
||||
|
||||
@@ -1,20 +1,5 @@
|
||||
/* eslint-disable @typescript-eslint/explicit-function-return-type */
|
||||
/* eslint-disable object-shorthand */
|
||||
/* eslint-disable func-names */
|
||||
|
||||
/**
|
||||
* Adds custom matchers from the react testing library to all tests
|
||||
*/
|
||||
import '@testing-library/jest-dom';
|
||||
import 'jest-styled-components';
|
||||
|
||||
// Mock window.matchMedia
|
||||
window.matchMedia =
|
||||
window.matchMedia ||
|
||||
function (): any {
|
||||
return {
|
||||
matches: false,
|
||||
addListener: function () {},
|
||||
removeListener: function () {},
|
||||
};
|
||||
};
|
||||
|
||||
@@ -27,42 +27,40 @@
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ant-design/colors": "6.0.0",
|
||||
"@ant-design/icons": "4.8.0",
|
||||
"@grafana/data": "^9.5.2",
|
||||
"@mdx-js/loader": "2.3.0",
|
||||
"@mdx-js/react": "2.3.0",
|
||||
"@ant-design/colors": "^6.0.0",
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@grafana/data": "^8.4.3",
|
||||
"@monaco-editor/react": "^4.3.1",
|
||||
"@uiw/react-md-editor": "3.23.5",
|
||||
"@testing-library/jest-dom": "^5.11.4",
|
||||
"@testing-library/react": "^11.1.0",
|
||||
"@testing-library/user-event": "^12.1.10",
|
||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||
"@xstate/react": "^3.0.0",
|
||||
"ansi-to-html": "0.7.2",
|
||||
"antd": "5.0.5",
|
||||
"antd-table-saveas-excel": "2.2.1",
|
||||
"antd": "4.19.2",
|
||||
"axios": "^0.21.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^29.6.4",
|
||||
"babel-loader": "9.1.3",
|
||||
"babel-jest": "^26.6.0",
|
||||
"babel-loader": "8.1.0",
|
||||
"babel-plugin-named-asset-import": "^0.3.7",
|
||||
"babel-preset-minify": "^0.5.1",
|
||||
"babel-preset-react-app": "^10.0.1",
|
||||
"chart.js": "3.9.1",
|
||||
"babel-preset-react-app": "^10.0.0",
|
||||
"chart.js": "^3.4.0",
|
||||
"chartjs-adapter-date-fns": "^2.0.0",
|
||||
"chartjs-plugin-annotation": "^1.4.0",
|
||||
"classnames": "2.3.2",
|
||||
"color": "^4.2.1",
|
||||
"color-alpha": "1.1.3",
|
||||
"cross-env": "^7.0.3",
|
||||
"css-loader": "4.3.0",
|
||||
"css-minimizer-webpack-plugin": "5.0.1",
|
||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||
"d3": "^6.2.0",
|
||||
"d3-flame-graph": "^3.1.1",
|
||||
"d3-tip": "^0.9.1",
|
||||
"dayjs": "^1.10.7",
|
||||
"dompurify": "3.0.0",
|
||||
"dotenv": "8.2.0",
|
||||
"event-source-polyfill": "1.0.31",
|
||||
"eventemitter3": "5.0.1",
|
||||
"file-loader": "6.1.1",
|
||||
"fontfaceobserver": "2.3.0",
|
||||
"flat": "^5.0.2",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.5.0",
|
||||
"html-webpack-plugin": "5.1.0",
|
||||
"i18next": "^21.6.12",
|
||||
"i18next-browser-languagedetector": "^6.1.3",
|
||||
"i18next-http-backend": "^1.3.2",
|
||||
@@ -72,28 +70,22 @@
|
||||
"less-loader": "^10.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
"mini-css-extract-plugin": "2.4.5",
|
||||
"papaparse": "5.4.1",
|
||||
"react": "18.2.0",
|
||||
"react-addons-update": "15.6.3",
|
||||
"react-dnd": "16.0.1",
|
||||
"react-dnd-html5-backend": "16.0.1",
|
||||
"react-dom": "18.2.0",
|
||||
"react-drag-listview": "2.0.0",
|
||||
"react-force-graph": "^1.43.0",
|
||||
"react": "17.0.0",
|
||||
"react-dom": "17.0.0",
|
||||
"react-force-graph": "^1.41.0",
|
||||
"react-graph-vis": "^1.0.5",
|
||||
"react-grid-layout": "^1.3.4",
|
||||
"react-helmet-async": "1.3.0",
|
||||
"react-i18next": "^11.16.1",
|
||||
"react-intersection-observer": "9.4.1",
|
||||
"react-query": "^3.34.19",
|
||||
"react-redux": "^7.2.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-use": "^17.3.2",
|
||||
"react-virtuoso": "4.0.3",
|
||||
"react-vis": "^1.11.7",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"stream": "^0.0.2",
|
||||
"style-loader": "1.3.0",
|
||||
"styled-components": "^5.3.11",
|
||||
"styled-components": "^5.2.1",
|
||||
"terser-webpack-plugin": "^5.2.5",
|
||||
"timestamp-nano": "^1.0.0",
|
||||
"ts-node": "^10.2.1",
|
||||
@@ -101,8 +93,8 @@
|
||||
"typescript": "^4.0.5",
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "5.88.2",
|
||||
"webpack-dev-server": "^4.15.1",
|
||||
"webpack": "^5.23.0",
|
||||
"webpack-dev-server": "^4.3.1",
|
||||
"xstate": "^4.31.0"
|
||||
},
|
||||
"browserslist": {
|
||||
@@ -118,56 +110,53 @@
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.22.11",
|
||||
"@babel/plugin-proposal-class-properties": "^7.18.6",
|
||||
"@babel/core": "^7.12.3",
|
||||
"@babel/plugin-proposal-class-properties": "^7.12.13",
|
||||
"@babel/plugin-syntax-jsx": "^7.12.13",
|
||||
"@babel/preset-env": "^7.22.14",
|
||||
"@babel/preset-env": "^7.12.17",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.21.4",
|
||||
"@commitlint/cli": "^16.3.0",
|
||||
"@babel/preset-typescript": "^7.12.17",
|
||||
"@commitlint/cli": "^16.2.4",
|
||||
"@commitlint/config-conventional": "^16.2.4",
|
||||
"@jest/globals": "^27.5.1",
|
||||
"@playwright/test": "^1.22.0",
|
||||
"@testing-library/jest-dom": "5.16.5",
|
||||
"@testing-library/react": "13.4.0",
|
||||
"@testing-library/user-event": "14.4.3",
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/color": "^3.0.3",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/dompurify": "^2.4.0",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/d3-tip": "^3.5.5",
|
||||
"@types/event-source-polyfill": "^1.0.0",
|
||||
"@types/fontfaceobserver": "2.1.0",
|
||||
"@types/flat": "^5.0.2",
|
||||
"@types/jest": "^27.5.1",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/mini-css-extract-plugin": "^2.5.1",
|
||||
"@types/node": "^16.10.3",
|
||||
"@types/papaparse": "5.3.7",
|
||||
"@types/react": "18.0.26",
|
||||
"@types/react-addons-update": "0.14.21",
|
||||
"@types/react-dom": "18.0.10",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/react-grid-layout": "^1.1.2",
|
||||
"@types/react-helmet-async": "1.0.3",
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-resizable": "3.0.3",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/redux": "^3.6.0",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/uuid": "^8.3.1",
|
||||
"@types/vis": "^4.21.21",
|
||||
"@types/webpack": "^5.28.0",
|
||||
"@types/webpack-dev-server": "^4.7.2",
|
||||
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||
"@typescript-eslint/parser": "^4.33.0",
|
||||
"@types/webpack-dev-server": "^4.3.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
||||
"@typescript-eslint/parser": "^4.28.2",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "9.0.0",
|
||||
"copy-webpack-plugin": "^8.1.0",
|
||||
"critters-webpack-plugin": "^3.0.1",
|
||||
"eslint": "^7.32.0",
|
||||
"eslint": "^7.30.0",
|
||||
"eslint-config-airbnb": "^19.0.4",
|
||||
"eslint-config-airbnb-typescript": "^16.1.4",
|
||||
"eslint-config-prettier": "^8.3.0",
|
||||
"eslint-config-standard": "^16.0.3",
|
||||
"eslint-plugin-import": "^2.28.1",
|
||||
"eslint-plugin-jest": "^26.9.0",
|
||||
"eslint-plugin-import": "^2.25.4",
|
||||
"eslint-plugin-jest": "^26.1.2",
|
||||
"eslint-plugin-jsx-a11y": "^6.5.1",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-prettier": "^4.0.0",
|
||||
@@ -178,19 +167,16 @@
|
||||
"eslint-plugin-sonarjs": "^0.12.0",
|
||||
"husky": "^7.0.4",
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.2",
|
||||
"jest-playwright-preset": "^1.7.0",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"lint-staged": "^12.5.0",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "^12.3.7",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"prettier": "2.2.1",
|
||||
"react-hooks-testing-library": "0.6.0",
|
||||
"react-hot-loader": "^4.13.0",
|
||||
"react-resizable": "3.0.4",
|
||||
"sass": "1.66.1",
|
||||
"sass-loader": "13.3.2",
|
||||
"ts-jest": "^27.1.5",
|
||||
"ts-jest": "^27.1.4",
|
||||
"ts-node": "^10.2.1",
|
||||
"typescript-plugin-css-modules": "5.0.1",
|
||||
"typescript-plugin-css-modules": "^3.4.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0",
|
||||
"webpack-cli": "^4.9.2"
|
||||
},
|
||||
@@ -200,10 +186,7 @@
|
||||
]
|
||||
},
|
||||
"resolutions": {
|
||||
"@types/react": "18.0.26",
|
||||
"@types/react-dom": "18.0.10",
|
||||
"debug": "4.3.4",
|
||||
"semver": "7.5.4",
|
||||
"xml2js": "0.5.0"
|
||||
"@types/react": "17.0.0",
|
||||
"@types/react-dom": "17.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 103.53" style="enable-background:new 0 0 122.88 103.53" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M5.47,0h111.93c3.01,0,5.47,2.46,5.47,5.47v92.58c0,3.01-2.46,5.47-5.47,5.47H5.47 c-3.01,0-5.47-2.46-5.47-5.47V5.47C0,2.46,2.46,0,5.47,0L5.47,0z M31.84,38.55l17.79,18.42l2.14,2.13l-2.12,2.16L31.68,80.31 l-5.07-5l15.85-16.15L26.81,43.6L31.84,38.55L31.84,38.55z M94.1,79.41H54.69v-6.84H94.1V79.41L94.1,79.41z M38.19,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S35,9.83,38.19,9.83L38.19,9.83z M18.95,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S15.75,9.83,18.95,9.83L18.95,9.83z M7.49,5.41 h107.91c1.15,0,2.09,0.94,2.09,2.09v18.32H5.4V7.5C5.4,6.35,6.34,5.41,7.49,5.41L7.49,5.41z"/></g></svg>
|
||||
|
Before Width: | Height: | Size: 1.0 KiB |
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 88.17" style="enable-background:new 0 0 122.88 88.17" xml:space="preserve"><style type="text/css">.st0{fill:#0091E2;}</style><g><path class="st0" d="M121.68,33.34c-0.34-0.28-3.42-2.62-10.03-2.62c-1.71,0-3.48,0.17-5.19,0.46c-1.25-8.72-8.49-12.94-8.78-13.16 l-1.77-1.03l-1.14,1.65c-1.42,2.22-2.51,4.73-3.13,7.29c-1.2,4.96-0.46,9.63,2.05,13.62c-3.02,1.71-7.92,2.11-8.95,2.17l-80.93,0 c-2.11,0-3.82,1.71-3.82,3.82c-0.11,7.07,1.08,14.13,3.53,20.8c2.79,7.29,6.95,12.71,12.31,16.01c6.04,3.7,15.9,5.81,27.01,5.81 c5.01,0,10.03-0.46,14.99-1.37c6.9-1.25,13.51-3.65,19.6-7.12c5.02-2.91,9.52-6.61,13.34-10.94c6.44-7.24,10.26-15.33,13.05-22.51 c0.4,0,0.74,0,1.14,0c7.01,0,11.34-2.79,13.73-5.19c1.6-1.48,2.79-3.31,3.65-5.36l0.51-1.48L121.68,33.34L121.68,33.34z M71.59,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C70.68,38.98,71.08,39.38,71.59,39.38L71.59,39.38z M56.49,11.63h10.83c0.51,0,0.97-0.4,0.97-0.97V0.97c0-0.51-0.46-0.97-0.97-0.97 L56.49,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,11.17,55.97,11.63,56.49,11.63L56.49,11.63z M56.49,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.46-0.97-0.97-0.97H56.49c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C55.52,25.08,55.97,25.53,56.49,25.53L56.49,25.53z M41.5,25.53h10.83c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0H41.5c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C40.53,25.08,40.93,25.53,41.5,25.53L41.5,25.53z M26.28,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0H26.28c-0.51,0-0.97,0.4-0.97,0.97v9.69 C25.37,25.08,25.77,25.53,26.28,25.53L26.28,25.53z M56.49,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,38.98,55.97,39.38,56.49,39.38L56.49,39.38L56.49,39.38z M41.5,39.38 h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C40.53,38.98,40.93,39.38,41.5,39.38L41.5,39.38L41.5,39.38z M26.28,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69 c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97v9.69C25.37,38.98,25.77,39.38,26.28,39.38L26.28,39.38z M11.35,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0 v9.69C10.44,38.98,10.84,39.38,11.35,39.38L11.35,39.38L11.35,39.38z"/></g></svg>
|
||||
|
Before Width: | Height: | Size: 2.5 KiB |
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 5.9 KiB |
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 109 122.88" style="enable-background:new 0 0 109 122.88" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;fill:#689F63;}</style><g><path class="st0" d="M68.43,87.08c-19.7,0-23.83-9.04-23.83-16.63c0-0.72,0.58-1.3,1.3-1.3h5.82c0.64,0,1.18,0.47,1.28,1.1 c0.88,5.93,3.49,8.92,15.41,8.92c9.49,0,13.52-2.14,13.52-7.18c0-2.9-1.15-5.05-15.89-6.49c-12.33-1.22-19.95-3.93-19.95-13.8 c0-9.08,7.66-14.49,20.5-14.49c14.42,0,21.56,5,22.46,15.76c0.03,0.37-0.1,0.73-0.35,1c-0.25,0.26-0.6,0.42-0.96,0.42H81.9 c-0.61,0-1.14-0.43-1.26-1.01c-1.41-6.23-4.81-8.23-14.07-8.23c-10.36,0-11.56,3.61-11.56,6.31c0,3.28,1.42,4.24,15.4,6.09 c13.84,1.84,20.41,4.43,20.41,14.16c0,9.81-8.18,15.43-22.45,15.43L68.43,87.08L68.43,87.08z M54.52,122.88 c-1.65,0-3.28-0.43-4.72-1.26l-15.03-8.9c-2.25-1.26-1.15-1.7-0.41-1.96c2.99-1.05,3.6-1.28,6.8-3.1c0.34-0.19,0.78-0.12,1.12,0.08 l11.55,6.85c0.42,0.23,1.01,0.23,1.4,0l45.03-25.99c0.42-0.24,0.69-0.72,0.69-1.22V35.43c0-0.52-0.27-0.98-0.7-1.24L55.23,8.22 c-0.42-0.25-0.97-0.25-1.39,0l-45,25.97c-0.44,0.25-0.71,0.73-0.71,1.23v51.96c0,0.5,0.27,0.97,0.7,1.21l12.33,7.12 c6.69,3.35,10.79-0.6,10.79-4.56V39.86c0-0.73,0.57-1.3,1.31-1.3l5.7,0c0.71,0,1.3,0.56,1.3,1.3v51.31 c0,8.93-4.87,14.05-13.33,14.05c-2.6,0-4.66,0-10.38-2.82L4.72,95.59C1.8,93.9,0,90.75,0,87.38V35.42c0-3.38,1.8-6.54,4.72-8.21 l45.07-26c2.85-1.61,6.64-1.61,9.47,0l45.02,26.01c2.91,1.68,4.72,4.82,4.72,8.21v51.96c0,3.37-1.81,6.51-4.72,8.21l-45.02,26 c-1.44,0.83-3.08,1.26-4.74,1.26L54.52,122.88L54.52,122.88z M54.52,122.88L54.52,122.88L54.52,122.88L54.52,122.88z"/></g></svg>
|
||||
|
Before Width: | Height: | Size: 1.7 KiB |
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 98.18" style="enable-background:new 0 0 122.88 98.18" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M3.42,0h116.05c1.88,0,3.41,1.54,3.41,3.41v91.36c0,1.88-1.54,3.41-3.41,3.41l-116.05,0 C1.54,98.18,0,96.65,0,94.77L0,3.41C0,1.53,1.54,0,3.42,0L3.42,0L3.42,0z M25.89,8.19c2.05,0,3.72,1.67,3.72,3.72 c0,2.05-1.67,3.72-3.72,3.72c-2.05,0-3.72-1.67-3.72-3.72C22.17,9.85,23.83,8.19,25.89,8.19L25.89,8.19z M103.07,7.69l2.52,2.77 l2.52-2.77l1.97,1.79l-2.69,2.96l2.69,2.96l-1.97,1.79l-2.52-2.77l-2.52,2.77l-1.97-1.79l2.69-2.96l-2.69-2.96L103.07,7.69 L103.07,7.69z M14.52,8.19c2.05,0,3.72,1.67,3.72,3.72c0,2.05-1.67,3.72-3.72,3.72c-2.05,0-3.72-1.67-3.72-3.72 C10.79,9.85,12.46,8.19,14.52,8.19L14.52,8.19z M37.26,8.19c2.05,0,3.72,1.67,3.72,3.72c0,2.05-1.67,3.72-3.72,3.72 c-2.05,0-3.72-1.67-3.72-3.72C33.54,9.85,35.21,8.19,37.26,8.19L37.26,8.19z M14.05,22.75h93.33c1.77,0,3.22,1.49,3.22,3.22v59.2 c0,1.73-1.49,3.22-3.22,3.22l-93.33,0c-1.73,0-3.22-1.45-3.22-3.22v-59.2C10.84,24.2,12.29,22.75,14.05,22.75L14.05,22.75 L14.05,22.75z"/></g></svg>
|
||||
|
Before Width: | Height: | Size: 1.3 KiB |
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 6.2 KiB |
10
frontend/public/css/antd.dark.min.css
vendored
Normal file
10
frontend/public/css/antd.dark.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
10
frontend/public/css/antd.min.css
vendored
Normal file
10
frontend/public/css/antd.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -1,113 +1,112 @@
|
||||
{
|
||||
"target_missing": "Please enter a threshold to proceed",
|
||||
"rule_test_fired": "Test notification sent successfully",
|
||||
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
||||
"button_testrule": "Test Notification",
|
||||
"label_channel_select": "Notification Channels",
|
||||
"placeholder_channel_select": "select one or more channels",
|
||||
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||
"preview_chart_threshold_label": "Threshold",
|
||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||
"button_yes": "Yes",
|
||||
"button_no": "No",
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||
"rule_created": "Rule created successfully",
|
||||
"rule_edited": "Rule edited successfully",
|
||||
"expression_missing": "expression is missing in {{where}}",
|
||||
"metricname_missing": "metric name is missing in {{where}}",
|
||||
"condition_required": "at least one metric condition is required",
|
||||
"alertname_required": "alert name is required",
|
||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||
"chquery_required": "query is required when query format is set to ClickHouse",
|
||||
"button_savechanges": "Save Rule",
|
||||
"button_createrule": "Create Rule",
|
||||
"button_returntorules": "Return to rules",
|
||||
"button_cancelchanges": "Cancel",
|
||||
"button_discard": "Discard",
|
||||
"text_condition1": "Send a notification when the metric is",
|
||||
"text_condition2": "the threshold",
|
||||
"text_condition3": "during the last",
|
||||
"option_5min": "5 mins",
|
||||
"option_10min": "10 mins",
|
||||
"option_15min": "15 mins",
|
||||
"option_60min": "60 mins",
|
||||
"option_4hours": "4 hours",
|
||||
"option_24hours": "24 hours",
|
||||
"field_threshold": "Alert Threshold",
|
||||
"option_allthetimes": "all the times",
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
"option_notequal": "not equal to",
|
||||
"button_query": "Query",
|
||||
"button_formula": "Formula",
|
||||
"tab_qb": "Query Builder",
|
||||
"tab_promql": "PromQL",
|
||||
"tab_chquery": "ClickHouse Query",
|
||||
"title_confirm": "Confirm",
|
||||
"button_ok": "Yes",
|
||||
"button_cancel": "No",
|
||||
"field_promql_expr": "PromQL Expression",
|
||||
"field_alert_name": "Alert Name",
|
||||
"field_alert_desc": "Alert Description",
|
||||
"field_labels": "Labels",
|
||||
"field_severity": "Severity",
|
||||
"option_critical": "Critical",
|
||||
"option_error": "Error",
|
||||
"option_warning": "Warning",
|
||||
"option_info": "Info",
|
||||
"user_guide_headline": "Steps to create an Alert",
|
||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_ch_step1": "Step 1 - Define the metric",
|
||||
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
||||
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
||||
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_ch_step2b": "Enter the Alert threshold",
|
||||
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_tooltip_more_help": "More details on how to create alerts",
|
||||
"choose_alert_type": "Choose a type for the alert:",
|
||||
"metric_based_alert": "Metric based Alert",
|
||||
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
||||
"log_based_alert": "Log-based Alert",
|
||||
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
||||
"traces_based_alert": "Trace-based Alert",
|
||||
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
||||
"exceptions_based_alert": "Exceptions-based Alert",
|
||||
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
|
||||
"field_unit": "Threshold unit"
|
||||
}
|
||||
"target_missing": "Please enter a threshold to proceed",
|
||||
"rule_test_fired": "Test notification sent successfully",
|
||||
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
||||
"button_testrule": "Test Notification",
|
||||
"label_channel_select": "Notification Channels",
|
||||
"placeholder_channel_select": "select one or more channels",
|
||||
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||
"preview_chart_threshold_label": "Threshold",
|
||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||
"button_yes": "Yes",
|
||||
"button_no": "No",
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||
"rule_created": "Rule created successfully",
|
||||
"rule_edited": "Rule edited successfully",
|
||||
"expression_missing": "expression is missing in {{where}}",
|
||||
"metricname_missing": "metric name is missing in {{where}}",
|
||||
"condition_required": "at least one metric condition is required",
|
||||
"alertname_required": "alert name is required",
|
||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||
"chquery_required": "query is required when query format is set to ClickHouse",
|
||||
"button_savechanges": "Save Rule",
|
||||
"button_createrule": "Create Rule",
|
||||
"button_returntorules": "Return to rules",
|
||||
"button_cancelchanges": "Cancel",
|
||||
"button_discard": "Discard",
|
||||
"text_condition1": "Send a notification when the metric is",
|
||||
"text_condition2": "the threshold",
|
||||
"text_condition3": "during the last",
|
||||
"option_5min": "5 mins",
|
||||
"option_10min": "10 mins",
|
||||
"option_15min": "15 mins",
|
||||
"option_60min": "60 mins",
|
||||
"option_4hours": "4 hours",
|
||||
"option_24hours": "24 hours",
|
||||
"field_threshold": "Alert Threshold",
|
||||
"option_allthetimes": "all the times",
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
"option_notequal": "not equal to",
|
||||
"button_query": "Query",
|
||||
"button_formula": "Formula",
|
||||
"tab_qb": "Query Builder",
|
||||
"tab_promql": "PromQL",
|
||||
"tab_chquery": "ClickHouse Query",
|
||||
"title_confirm": "Confirm",
|
||||
"button_ok": "Yes",
|
||||
"button_cancel": "No",
|
||||
"field_promql_expr": "PromQL Expression",
|
||||
"field_alert_name": "Alert Name",
|
||||
"field_alert_desc": "Alert Description",
|
||||
"field_labels": "Labels",
|
||||
"field_severity": "Severity",
|
||||
"option_critical": "Critical",
|
||||
"option_error": "Error",
|
||||
"option_warning": "Warning",
|
||||
"option_info": "Info",
|
||||
"user_guide_headline": "Steps to create an Alert",
|
||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_ch_step1": "Step 1 - Define the metric",
|
||||
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
||||
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
||||
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_ch_step2b": "Enter the Alert threshold",
|
||||
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_tooltip_more_help": "More details on how to create alerts",
|
||||
"choose_alert_type": "Choose a type for the alert:",
|
||||
"metric_based_alert": "Metric based Alert",
|
||||
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
||||
"log_based_alert": "Log-based Alert",
|
||||
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
||||
"traces_based_alert": "Trace-based Alert",
|
||||
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
||||
"exceptions_based_alert": "Exceptions-based Alert",
|
||||
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"name_of_the_view": "Name of the view"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
{ "fetching_log_lines": "Fetching log lines" }
|
||||
@@ -5,9 +5,5 @@
|
||||
"my_settings": "My Settings",
|
||||
"overview_metrics": "Overview Metrics",
|
||||
"dbcall_metrics": "Database Calls",
|
||||
"external_metrics": "External Calls",
|
||||
"pipeline": "Pipeline",
|
||||
"pipelines": "Pipelines",
|
||||
"archives": "Archives",
|
||||
"logs_to_metrics": "Logs To Metrics"
|
||||
"external_metrics": "External Calls"
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"label_orgname": "Organization Name",
|
||||
"placeholder_orgname": "Your Company",
|
||||
"prompt_keepme_posted": "Keep me updated on new SigNoz features",
|
||||
"prompt_anonymise": "Anonymise my usage data. We collect data to measure product usage",
|
||||
"prompt_anonymise": "Anonymise my usage date. We collect data to measure product usage",
|
||||
"failed_confirm_password": "Passwords don’t match. Please try again",
|
||||
"unexpected_error": "Something went wrong",
|
||||
"failed_to_initiate_login": "Signup completed but failed to initiate login",
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"SIGN_UP": "SigNoz | Sign Up",
|
||||
"LOGIN": "SigNoz | Login",
|
||||
"GET_STARTED": "SigNoz | Get Started",
|
||||
"SERVICE_METRICS": "SigNoz | Service Metrics",
|
||||
"SERVICE_MAP": "SigNoz | Service Map",
|
||||
"TRACE": "SigNoz | Trace",
|
||||
"TRACE_DETAIL": "SigNoz | Trace Detail",
|
||||
"TRACES_EXPLORER": "SigNoz | Traces Explorer",
|
||||
"SETTINGS": "SigNoz | Settings",
|
||||
"USAGE_EXPLORER": "SigNoz | Usage Explorer",
|
||||
"APPLICATION": "SigNoz | Home",
|
||||
"ALL_DASHBOARD": "SigNoz | All Dashboards",
|
||||
"DASHBOARD": "SigNoz | Dashboard",
|
||||
"DASHBOARD_WIDGET": "SigNoz | Dashboard Widget",
|
||||
"EDIT_ALERTS": "SigNoz | Edit Alerts",
|
||||
"LIST_ALL_ALERT": "SigNoz | All Alerts",
|
||||
"ALERTS_NEW": "SigNoz | New Alert",
|
||||
"ALL_CHANNELS": "SigNoz | All Channels",
|
||||
"CHANNELS_NEW": "SigNoz | New Channel",
|
||||
"CHANNELS_EDIT": "SigNoz | Edit Channel",
|
||||
"ALL_ERROR": "SigNoz | All Errors",
|
||||
"ERROR_DETAIL": "SigNoz | Error Detail",
|
||||
"VERSION": "SigNoz | Version",
|
||||
"MY_SETTINGS": "SigNoz | My Settings",
|
||||
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
||||
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
||||
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
||||
"NOT_FOUND": "SigNoz | Page Not Found",
|
||||
"LOGS": "SigNoz | Logs",
|
||||
"LOGS_EXPLORER": "SigNoz | Logs Explorer",
|
||||
"LIVE_LOGS": "SigNoz | Live Logs",
|
||||
"HOME_PAGE": "Open source Observability Platform | SigNoz",
|
||||
"PASSWORD_RESET": "SigNoz | Password Reset",
|
||||
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz"
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"options_menu": {
|
||||
"options": "Options",
|
||||
"format": "Format",
|
||||
"raw": "Raw",
|
||||
"default": "Default",
|
||||
"column": "Column",
|
||||
"maxLines": "Max lines per Row",
|
||||
"addColumn": "Add a column"
|
||||
}
|
||||
}
|
||||
@@ -12,8 +12,6 @@
|
||||
"routes": {
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"all_errors": "All Exceptions",
|
||||
"index_fields": "Index Fields",
|
||||
"pipelines": "Pipelines"
|
||||
"all_errors": "All Exceptions"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,113 +1,112 @@
|
||||
{
|
||||
"target_missing": "Please enter a threshold to proceed",
|
||||
"rule_test_fired": "Test notification sent successfully",
|
||||
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
||||
"button_testrule": "Test Notification",
|
||||
"label_channel_select": "Notification Channels",
|
||||
"placeholder_channel_select": "select one or more channels",
|
||||
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||
"preview_chart_threshold_label": "Threshold",
|
||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||
"button_yes": "Yes",
|
||||
"button_no": "No",
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||
"rule_created": "Rule created successfully",
|
||||
"rule_edited": "Rule edited successfully",
|
||||
"expression_missing": "expression is missing in {{where}}",
|
||||
"metricname_missing": "metric name is missing in {{where}}",
|
||||
"condition_required": "at least one metric condition is required",
|
||||
"alertname_required": "alert name is required",
|
||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||
"chquery_required": "query is required when query format is set to ClickHouse",
|
||||
"button_savechanges": "Save Rule",
|
||||
"button_createrule": "Create Rule",
|
||||
"button_returntorules": "Return to rules",
|
||||
"button_cancelchanges": "Cancel",
|
||||
"button_discard": "Discard",
|
||||
"text_condition1": "Send a notification when the metric is",
|
||||
"text_condition2": "the threshold",
|
||||
"text_condition3": "during the last",
|
||||
"option_5min": "5 mins",
|
||||
"option_10min": "10 mins",
|
||||
"option_15min": "15 mins",
|
||||
"option_60min": "60 mins",
|
||||
"option_4hours": "4 hours",
|
||||
"option_24hours": "24 hours",
|
||||
"field_threshold": "Alert Threshold",
|
||||
"option_allthetimes": "all the times",
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
"option_notequal": "not equal to",
|
||||
"button_query": "Query",
|
||||
"button_formula": "Formula",
|
||||
"tab_qb": "Query Builder",
|
||||
"tab_promql": "PromQL",
|
||||
"tab_chquery": "ClickHouse Query",
|
||||
"title_confirm": "Confirm",
|
||||
"button_ok": "Yes",
|
||||
"button_cancel": "No",
|
||||
"field_promql_expr": "PromQL Expression",
|
||||
"field_alert_name": "Alert Name",
|
||||
"field_alert_desc": "Alert Description",
|
||||
"field_labels": "Labels",
|
||||
"field_severity": "Severity",
|
||||
"option_critical": "Critical",
|
||||
"option_error": "Error",
|
||||
"option_warning": "Warning",
|
||||
"option_info": "Info",
|
||||
"user_guide_headline": "Steps to create an Alert",
|
||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_ch_step1": "Step 1 - Define the metric",
|
||||
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
||||
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
||||
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_ch_step2b": "Enter the Alert threshold",
|
||||
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_tooltip_more_help": "More details on how to create alerts",
|
||||
"choose_alert_type": "Choose a type for the alert:",
|
||||
"metric_based_alert": "Metric based Alert",
|
||||
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
||||
"log_based_alert": "Log-based Alert",
|
||||
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
||||
"traces_based_alert": "Trace-based Alert",
|
||||
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
||||
"exceptions_based_alert": "Exceptions-based Alert",
|
||||
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
|
||||
"field_unit": "Threshold unit"
|
||||
}
|
||||
"target_missing": "Please enter a threshold to proceed",
|
||||
"rule_test_fired": "Test notification sent successfully",
|
||||
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
||||
"button_testrule": "Test Notification",
|
||||
"label_channel_select": "Notification Channels",
|
||||
"placeholder_channel_select": "select one or more channels",
|
||||
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||
"preview_chart_threshold_label": "Threshold",
|
||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||
"button_yes": "Yes",
|
||||
"button_no": "No",
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||
"rule_created": "Rule created successfully",
|
||||
"rule_edited": "Rule edited successfully",
|
||||
"expression_missing": "expression is missing in {{where}}",
|
||||
"metricname_missing": "metric name is missing in {{where}}",
|
||||
"condition_required": "at least one metric condition is required",
|
||||
"alertname_required": "alert name is required",
|
||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||
"chquery_required": "query is required when query format is set to ClickHouse",
|
||||
"button_savechanges": "Save Rule",
|
||||
"button_createrule": "Create Rule",
|
||||
"button_returntorules": "Return to rules",
|
||||
"button_cancelchanges": "Cancel",
|
||||
"button_discard": "Discard",
|
||||
"text_condition1": "Send a notification when the metric is",
|
||||
"text_condition2": "the threshold",
|
||||
"text_condition3": "during the last",
|
||||
"option_5min": "5 mins",
|
||||
"option_10min": "10 mins",
|
||||
"option_15min": "15 mins",
|
||||
"option_60min": "60 mins",
|
||||
"option_4hours": "4 hours",
|
||||
"option_24hours": "24 hours",
|
||||
"field_threshold": "Alert Threshold",
|
||||
"option_allthetimes": "all the times",
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
"option_notequal": "not equal to",
|
||||
"button_query": "Query",
|
||||
"button_formula": "Formula",
|
||||
"tab_qb": "Query Builder",
|
||||
"tab_promql": "PromQL",
|
||||
"tab_chquery": "ClickHouse Query",
|
||||
"title_confirm": "Confirm",
|
||||
"button_ok": "Yes",
|
||||
"button_cancel": "No",
|
||||
"field_promql_expr": "PromQL Expression",
|
||||
"field_alert_name": "Alert Name",
|
||||
"field_alert_desc": "Alert Description",
|
||||
"field_labels": "Labels",
|
||||
"field_severity": "Severity",
|
||||
"option_critical": "Critical",
|
||||
"option_error": "Error",
|
||||
"option_warning": "Warning",
|
||||
"option_info": "Info",
|
||||
"user_guide_headline": "Steps to create an Alert",
|
||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_ch_step1": "Step 1 - Define the metric",
|
||||
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
||||
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
||||
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_ch_step2b": "Enter the Alert threshold",
|
||||
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_tooltip_more_help": "More details on how to create alerts",
|
||||
"choose_alert_type": "Choose a type for the alert:",
|
||||
"metric_based_alert": "Metric based Alert",
|
||||
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
||||
"log_based_alert": "Log-based Alert",
|
||||
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
||||
"traces_based_alert": "Trace-based Alert",
|
||||
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
||||
"exceptions_based_alert": "Exceptions-based Alert",
|
||||
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
|
||||
}
|
||||
@@ -20,9 +20,6 @@
|
||||
"field_slack_recipient": "Recipient",
|
||||
"field_slack_title": "Title",
|
||||
"field_slack_description": "Description",
|
||||
"field_opsgenie_api_key": "API Key",
|
||||
"field_opsgenie_description": "Description",
|
||||
"placeholder_opsgenie_description": "Description",
|
||||
"field_webhook_username": "User Name (optional)",
|
||||
"field_webhook_password": "Password (optional)",
|
||||
"field_pager_routing_key": "Routing Key",
|
||||
@@ -34,12 +31,8 @@
|
||||
"field_pager_class": "Class",
|
||||
"field_pager_client": "Client",
|
||||
"field_pager_client_url": "Client URL",
|
||||
"field_opsgenie_message": "Message",
|
||||
"field_opsgenie_priority": "Priority",
|
||||
"placeholder_slack_description": "Description",
|
||||
"placeholder_pager_description": "Description",
|
||||
"placeholder_opsgenie_message": "Message",
|
||||
"placeholder_opsgenie_priority": "Priority",
|
||||
"help_pager_client": "Shows up as event source in Pagerduty",
|
||||
"help_pager_client_url": "Shows up as event source link in Pagerduty",
|
||||
"help_pager_class": "The class/type of the event",
|
||||
@@ -50,9 +43,6 @@
|
||||
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
|
||||
"help_webhook_password": "Specify a password or bearer token",
|
||||
"help_pager_description": "Shows up as description in pagerduty",
|
||||
"help_opsgenie_message": "Shows up as message in opsgenie",
|
||||
"help_opsgenie_priority": "Priority of the incident",
|
||||
"help_opsgenie_description": "Shows up as description in opsgenie",
|
||||
"channel_creation_done": "Successfully created the channel",
|
||||
"channel_creation_failed": "An unexpected error occurred while creating this channel",
|
||||
"channel_edit_done": "Channels Edited Successfully",
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"name_of_the_view": "Name of the view"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
{ "fetching_log_lines": "Fetching log lines" }
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"delete": "Delete",
|
||||
"filter": "Filter",
|
||||
"update": "Update",
|
||||
"create": "Create",
|
||||
"reorder": "Reorder",
|
||||
"cancel": "Cancel",
|
||||
"reorder_pipeline": "Do you want to reorder pipeline?",
|
||||
"reorder_pipeline_description": "Logs are processed sequentially in processors and pipelines. Reordering it may change how data is processed by them.",
|
||||
"delete_pipeline": "Do you want to delete pipeline",
|
||||
"delete_pipeline_description": "Logs are processed sequentially in processors and pipelines. Deleting a pipeline may change content of data processed by other pipelines & processors",
|
||||
"add_new_pipeline": "Add a New Pipeline",
|
||||
"new_pipeline": "New Pipeline",
|
||||
"enter_edit_mode": "Enter Edit Mode",
|
||||
"save_configuration": "Save Configuration",
|
||||
"edit_pipeline": "Edit Pipeline",
|
||||
"create_pipeline": "Create New Pipeline",
|
||||
"add_new_processor": "Add Processor",
|
||||
"edit_processor": "Edit Processor",
|
||||
"create_processor": "Create New Processor",
|
||||
"processor_type": "Select Processor Type",
|
||||
"reorder_processor": "Do you want to reorder processor?",
|
||||
"reorder_processor_description": "Logs are processed sequentially in processors. Reordering it may change how data is processed by them.",
|
||||
"delete_processor": "Do you want to delete processor",
|
||||
"delete_processor_description": "Logs are processed sequentially in processors. Deleting a processor may change content of data processed by other processors",
|
||||
"search_pipeline_placeholder": "Filter Pipelines",
|
||||
"pipeline_name_placeholder": "Name",
|
||||
"pipeline_tags_placeholder": "Tags",
|
||||
"pipeline_description_placeholder": "Enter description for your pipeline",
|
||||
"processor_name_placeholder": "Name",
|
||||
"processor_regex_placeholder": "Regex",
|
||||
"processor_parsefrom_placeholder": "Parse From",
|
||||
"processor_parseto_placeholder": "Parse From",
|
||||
"processor_onerror_placeholder": "on Error",
|
||||
"processor_pattern_placeholder": "Pattern",
|
||||
"processor_field_placeholder": "Field",
|
||||
"processor_value_placeholder": "Value",
|
||||
"processor_description_placeholder": "example rule: %{word:first}",
|
||||
"processor_trace_id_placeholder": "Trace Id Parce From",
|
||||
"processor_span_id_placeholder": "Span id Parse From",
|
||||
"processor_trace_flags_placeholder": "Trace flags parse from",
|
||||
"processor_from_placeholder": "From",
|
||||
"processor_to_placeholder": "To"
|
||||
}
|
||||
@@ -5,9 +5,5 @@
|
||||
"my_settings": "My Settings",
|
||||
"overview_metrics": "Overview Metrics",
|
||||
"dbcall_metrics": "Database Calls",
|
||||
"external_metrics": "External Calls",
|
||||
"pipeline": "Pipeline",
|
||||
"pipelines": "Pipelines",
|
||||
"archives": "Archives",
|
||||
"logs_to_metrics": "Logs To Metrics"
|
||||
"external_metrics": "External Calls"
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"label_orgname": "Organization Name",
|
||||
"placeholder_orgname": "Your Company",
|
||||
"prompt_keepme_posted": "Keep me updated on new SigNoz features",
|
||||
"prompt_anonymise": "Anonymise my usage data. We collect data to measure product usage",
|
||||
"prompt_anonymise": "Anonymise my usage date. We collect data to measure product usage",
|
||||
"failed_confirm_password": "Passwords don’t match. Please try again",
|
||||
"unexpected_error": "Something went wrong",
|
||||
"failed_to_initiate_login": "Signup completed but failed to initiate login",
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"SIGN_UP": "SigNoz | Sign Up",
|
||||
"LOGIN": "SigNoz | Login",
|
||||
"SERVICE_METRICS": "SigNoz | Service Metrics",
|
||||
"SERVICE_MAP": "SigNoz | Service Map",
|
||||
"GET_STARTED": "SigNoz | Get Started",
|
||||
"TRACE": "SigNoz | Trace",
|
||||
"TRACE_DETAIL": "SigNoz | Trace Detail",
|
||||
"TRACES_EXPLORER": "SigNoz | Traces Explorer",
|
||||
"SETTINGS": "SigNoz | Settings",
|
||||
"USAGE_EXPLORER": "SigNoz | Usage Explorer",
|
||||
"APPLICATION": "SigNoz | Home",
|
||||
"ALL_DASHBOARD": "SigNoz | All Dashboards",
|
||||
"DASHBOARD": "SigNoz | Dashboard",
|
||||
"DASHBOARD_WIDGET": "SigNoz | Dashboard Widget",
|
||||
"EDIT_ALERTS": "SigNoz | Edit Alerts",
|
||||
"LIST_ALL_ALERT": "SigNoz | All Alerts",
|
||||
"ALERTS_NEW": "SigNoz | New Alert",
|
||||
"ALL_CHANNELS": "SigNoz | All Channels",
|
||||
"CHANNELS_NEW": "SigNoz | New Channel",
|
||||
"CHANNELS_EDIT": "SigNoz | Edit Channel",
|
||||
"ALL_ERROR": "SigNoz | All Errors",
|
||||
"ERROR_DETAIL": "SigNoz | Error Detail",
|
||||
"VERSION": "SigNoz | Version",
|
||||
"MY_SETTINGS": "SigNoz | My Settings",
|
||||
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
||||
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
||||
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
||||
"NOT_FOUND": "SigNoz | Page Not Found",
|
||||
"LOGS": "SigNoz | Logs",
|
||||
"LOGS_EXPLORER": "SigNoz | Logs Explorer",
|
||||
"LIVE_LOGS": "SigNoz | Live Logs",
|
||||
"HOME_PAGE": "Open source Observability Platform | SigNoz",
|
||||
"PASSWORD_RESET": "SigNoz | Password Reset",
|
||||
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz"
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"options_menu": {
|
||||
"options": "Options",
|
||||
"format": "Format",
|
||||
"raw": "Raw",
|
||||
"default": "Default",
|
||||
"column": "Column",
|
||||
"maxLines": "Max lines per Row",
|
||||
"addColumn": "Add a column"
|
||||
}
|
||||
}
|
||||
@@ -12,8 +12,6 @@
|
||||
"routes": {
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"all_errors": "All Exceptions",
|
||||
"index_fields": "Index Fields",
|
||||
"pipelines": "Pipelines"
|
||||
"all_errors": "All Exceptions"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
/* eslint-disable react-hooks/exhaustive-deps */
|
||||
import { notification } from 'antd';
|
||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||
import loginApi from 'api/user/login';
|
||||
import { Logout } from 'api/utils';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import history from 'lib/history';
|
||||
import { ReactChild, useEffect, useMemo } from 'react';
|
||||
import React, { useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
import { matchPath, Redirect, useLocation } from 'react-router-dom';
|
||||
@@ -47,8 +47,6 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
|
||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
||||
|
||||
const { notifications } = useNotifications();
|
||||
|
||||
const currentRoute = mapRoutes.get('current');
|
||||
|
||||
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
|
||||
@@ -108,7 +106,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
} else {
|
||||
Logout();
|
||||
|
||||
notifications.error({
|
||||
notification.error({
|
||||
message: response.error || t('something_went_wrong'),
|
||||
});
|
||||
}
|
||||
@@ -161,7 +159,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
}
|
||||
|
||||
interface PrivateRouteProps {
|
||||
children: ReactChild;
|
||||
children: React.ReactChild;
|
||||
}
|
||||
|
||||
export default PrivateRoute;
|
||||
|
||||
@@ -1,107 +1,37 @@
|
||||
import { ConfigProvider } from 'antd';
|
||||
import NotFound from 'components/NotFound';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import ROUTES from 'constants/routes';
|
||||
import AppLayout from 'container/AppLayout';
|
||||
import { useThemeConfig } from 'hooks/useDarkMode';
|
||||
import useGetFeatureFlag from 'hooks/useGetFeatureFlag';
|
||||
import { NotificationProvider } from 'hooks/useNotifications';
|
||||
import { ResourceProvider } from 'hooks/useResourceAttribute';
|
||||
import history from 'lib/history';
|
||||
import { QueryBuilderProvider } from 'providers/QueryBuilder';
|
||||
import { Suspense, useState } from 'react';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
import React, { Suspense } from 'react';
|
||||
import { Route, Router, Switch } from 'react-router-dom';
|
||||
import { Dispatch } from 'redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import AppActions from 'types/actions';
|
||||
import { UPDATE_FEATURE_FLAG_RESPONSE } from 'types/actions/app';
|
||||
import AppReducer from 'types/reducer/app';
|
||||
|
||||
import PrivateRoute from './Private';
|
||||
import defaultRoutes from './routes';
|
||||
import routes from './routes';
|
||||
|
||||
function App(): JSX.Element {
|
||||
const themeConfig = useThemeConfig();
|
||||
const [routes, setRoutes] = useState(defaultRoutes);
|
||||
const { isLoggedIn: isLoggedInState, user } = useSelector<
|
||||
AppState,
|
||||
AppReducer
|
||||
>((state) => state.app);
|
||||
|
||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
||||
|
||||
const { hostname } = window.location;
|
||||
|
||||
const featureResponse = useGetFeatureFlag((allFlags) => {
|
||||
const isOnboardingEnabled =
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.ONBOARDING)?.active ||
|
||||
false;
|
||||
|
||||
const isChatSupportEnabled =
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.CHAT_SUPPORT)?.active ||
|
||||
false;
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_FEATURE_FLAG_RESPONSE,
|
||||
payload: {
|
||||
featureFlag: allFlags,
|
||||
refetch: featureResponse.refetch,
|
||||
},
|
||||
});
|
||||
|
||||
if (
|
||||
!isOnboardingEnabled ||
|
||||
!(hostname && hostname.endsWith('signoz.cloud'))
|
||||
) {
|
||||
const newRoutes = routes.filter(
|
||||
(route) => route?.path !== ROUTES.GET_STARTED,
|
||||
);
|
||||
|
||||
setRoutes(newRoutes);
|
||||
}
|
||||
|
||||
if (isLoggedInState && isChatSupportEnabled) {
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
window.Intercom('boot', {
|
||||
app_id: process.env.INTERCOM_APP_ID,
|
||||
email: user?.email || '',
|
||||
name: user?.name || '',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return (
|
||||
<ConfigProvider theme={themeConfig}>
|
||||
<Router history={history}>
|
||||
<NotificationProvider>
|
||||
<PrivateRoute>
|
||||
<ResourceProvider>
|
||||
<QueryBuilderProvider>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }) => (
|
||||
<Route
|
||||
key={`${path}`}
|
||||
exact={exact}
|
||||
path={path}
|
||||
component={component}
|
||||
/>
|
||||
))}
|
||||
<Router history={history}>
|
||||
<PrivateRoute>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }) => {
|
||||
return (
|
||||
<Route
|
||||
key={`${path}`}
|
||||
exact={exact}
|
||||
path={path}
|
||||
component={component}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</QueryBuilderProvider>
|
||||
</ResourceProvider>
|
||||
</PrivateRoute>
|
||||
</NotificationProvider>
|
||||
</Router>
|
||||
</ConfigProvider>
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</PrivateRoute>
|
||||
</Router>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user