Compare commits

..

1 Commits

Author SHA1 Message Date
Srikanth Chekuri
c5718b8338 chore: add config files and documentation for SSL certificates monitoring 2023-03-28 05:46:16 +05:30
2141 changed files with 17380 additions and 127196 deletions

9
.github/CODEOWNERS vendored
View File

@@ -1,10 +1,7 @@
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
# Owners are automatically requested for review for PRs that changes code
# that they own.
/frontend/ @palashgdev @YounixM
/frontend/src/container/MetricsApplication @srikanthccv
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
* @ankitnayan
/frontend/ @palashgdev @pranshuchittora
/deploy/ @prashant-shahi
/sample-apps/ @prashant-shahi
.github @prashant-shahi
**/query-service/ @srikanthccv

View File

@@ -1,17 +0,0 @@
### Summary
<!-- ✍️ A clear and concise description...-->
#### Related Issues / PR's
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
#### Screenshots
NA
<!-- ✍️ Add screenshots of before and after changes where applicable-->
#### Affected Areas and Manually Tested Areas
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->

View File

@@ -12,31 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
uses: actions/checkout@v2
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
@@ -55,11 +31,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
uses: actions/checkout@v2
- name: Run tests
shell: bash
run: |
@@ -73,11 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
uses: actions/checkout@v2
- name: Build EE query-service image
shell: bash
run: |

View File

@@ -39,11 +39,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -54,7 +54,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@@ -68,4 +68,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v1

View File

@@ -7,7 +7,12 @@ jobs:
lint-commits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2.3.1
with:
# we actually need "github.event.pull_request.commits + 1" commit
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v5
- uses: actions/setup-node@v2.1.0
# or just "yarn" if you depend on "@commitlint/cli" already
- run: yarn add @commitlint/cli
- run: yarn add @commitlint/config-conventional
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD

View File

@@ -12,11 +12,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Codebase
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
repository: signoz/gh-bot
- name: Use Node v16
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: 16
- name: Setup Cache & Install Dependencies

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: 'Dependency Review'
with:
fail-on-severity: high

View File

@@ -13,7 +13,7 @@ jobs:
DOCKER_TAG: pull-${{ github.event.number }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Build query-service image
env:
@@ -37,7 +37,7 @@ jobs:
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
@@ -69,14 +69,12 @@ jobs:
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
- name: Get short commit SHA and display tunnel URL
id: get-subdomain
run: |
subdomain="pr-$(git rev-parse --short HEAD)"
echo "URL for tunnelling: https://$subdomain.loca.lt"
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT
worker_ip="$(curl -4 -s ipconfig.io/ip)"
echo "Worker node IP address: $worker_ip"
echo "::set-output name=subdomain::$subdomain"
- name: Start tunnel
env:

View File

@@ -9,8 +9,8 @@ jobs:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: "16.x"
- name: Install dependencies

View File

@@ -5,7 +5,7 @@ name: VerifyIssue
on:
pull_request:
types: [edited, opened]
types: [edited, synchronize, opened, reopened]
check_run:
jobs:
@@ -14,6 +14,6 @@ jobs:
name: Ensure Pull Request has a linked issue.
steps:
- name: Verify Linked Issue
uses: srikanthccv/verify-linked-issue-action@v0.71
uses: hattan/verify-linked-issue-action@v1.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -14,27 +14,23 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
- uses: benjlevesque/short-sha@v1.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
@@ -46,11 +42,6 @@ jobs:
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-query-service
@@ -58,27 +49,23 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
- uses: benjlevesque/short-sha@v1.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
@@ -90,11 +77,6 @@ jobs:
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-ee-query-service
@@ -102,7 +84,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install dependencies
working-directory: frontend
run: yarn install
@@ -115,19 +97,19 @@ jobs:
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
- uses: benjlevesque/short-sha@v1.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
@@ -141,52 +123,3 @@ jobs:
fi
- name: Build and push docker image
run: make build-push-frontend
image-build-and-push-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend

View File

@@ -12,12 +12,6 @@ on:
jobs:
update_release_draft:
permissions:
# write permission is required to create a github release
contents: write
# write permission is required for autolabeler
# otherwise, read permission is required at least
pull-requests: write
runs-on: ubuntu-latest
steps:
# (Optional) GitHub Enterprise requires GHE_HOST variable set

View File

@@ -8,15 +8,9 @@ jobs:
remove:
runs-on: ubuntu-latest
steps:
- name: Remove label ok-to-test from PR
uses: buildsville/add-remove-label@v2.0.0
- name: Remove label
uses: buildsville/add-remove-label@v1
with:
label: ok-to-test
type: remove
token: ${{ secrets.GITHUB_TOKEN }}
- name: Remove label testing-deploy from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: testing-deploy
label: ok-to-test,testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -3,7 +3,7 @@ on:
pull_request:
branches:
- main
- develop
- v*
paths:
- 'frontend/**'
defaults:
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Sonar analysis

View File

@@ -11,25 +11,21 @@ jobs:
environment: staging
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.8
uses: appleboy/ssh-action@v0.1.6
env:
GITHUB_BRANCH: develop
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSH_KEY }}
key: ${{ secrets.EC2_SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
docker system prune --force
docker pull signoz/signoz-otel-collector:main
docker pull signoz/signoz-schema-migrator:main
cd ~/signoz
git status
git add .

View File

@@ -11,14 +11,14 @@ jobs:
if: ${{ github.event.label.name == 'testing-deploy' }}
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.8
uses: appleboy/ssh-action@v0.1.6
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSH_KEY }}
key: ${{ secrets.EC2_SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
@@ -26,7 +26,6 @@ jobs:
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
docker system prune --force
cd ~/signoz
git status

13
.gitignore vendored
View File

@@ -1,5 +1,7 @@
node_modules
yarn.lock
package.json
deploy/docker/environment_tiny/common_test
frontend/node_modules
@@ -37,7 +39,7 @@ frontend/src/constants/env.ts
**/locust-scripts/__pycache__/
**/__debug_bin
.env
frontend/*.env
pkg/query-service/signoz.db
pkg/query-service/tests/test-deploy/data/
@@ -53,12 +55,3 @@ ee/query-service/tests/test-deploy/data/
bin/
*/query-service/queries.active
# e2e
e2e/node_modules/
e2e/test-results/
e2e/playwright-report/
e2e/blob-report/
e2e/playwright/.cache/
e2e/.auth

View File

@@ -80,7 +80,7 @@ Before sending us a pull request, please ensure that,
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
* 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc...
@@ -338,7 +338,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
@@ -361,7 +361,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```

102
Makefile
View File

@@ -8,7 +8,6 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants.
FRONTEND_DIRECTORY ?= frontend
@@ -16,15 +15,15 @@ QUERY_SERVICE_DIRECTORY ?= pkg/query-service
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
GOPATH ?= $(shell go env GOPATH)
LOCAL_GOOS ?= $(shell go env GOOS)
LOCAL_GOARCH ?= $(shell go env GOARCH)
REPONAME ?= signoz
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
DOCKER_TAG ?= latest
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
DEV_BUILD ?= ""
# Build-time Go variables
PACKAGE?=go.signoz.io/signoz
@@ -38,22 +37,10 @@ LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildV
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service
# Steps to build static files of frontend
build-frontend-static:
@echo "------------------"
@echo "--> Building frontend static files"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
rm -rf build && \
CI=1 yarn install && \
yarn build && \
ls -l build
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64: build-frontend-static
build-frontend-amd64:
@echo "------------------"
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@@ -62,60 +49,32 @@ build-frontend-amd64: build-frontend-static
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend: build-frontend-static
build-push-frontend:
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
docker buildx build --file Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build static binary of query service
.PHONY: build-query-service-static
build-query-service-static:
@echo "------------------"
@echo "--> Building query-service static binary"
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
fi
.PHONY: build-query-service-static-amd64
build-query-service-static-amd64:
make GOARCH=amd64 build-query-service-static
.PHONY: build-query-service-static-arm64
build-query-service-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64
build-query-service-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service: build-query-service-static-all
build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
--push --platform linux/arm64,linux/amd64 \
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane \
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Step to build EE docker image of query service in amd64 (used in build pipeline)
@@ -123,14 +82,24 @@ build-ee-query-service-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
@if [ $(DEV_BUILD) != "" ]; then \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
else \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
fi
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--progress plane --push --platform linux/arm64,linux/amd64 \
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
mkdir -p /var/lib/signoz
@@ -141,7 +110,7 @@ dev-setup:
@echo "------------------"
run-local:
@docker-compose -f \
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
up --build -d
@@ -167,21 +136,6 @@ clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-standalone-ch:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
test:
go test ./pkg/query-service/app/metrics/...
go test ./pkg/query-service/cache/...
go test ./pkg/query-service/app/...
go test ./pkg/query-service/app/querier/...
go test ./pkg/query-service/converter/...
go test ./pkg/query-service/formatter/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/rules/...
go test ./pkg/query-service/collectorsimulator/...

View File

@@ -5,7 +5,7 @@
</p>
<p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
@@ -14,62 +14,27 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
<a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
</h3>
##
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. Mit SigNoz können Sie Folgendes tun:
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
👉 Visualisieren Sie Metriken, Traces und Logs in einer einzigen Oberfläche.
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
👉 Sie können Metriken wie die p99-Latenz, Fehlerquoten für Ihre Dienste, externe API-Aufrufe und individuelle Endpunkte anzeigen.
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
👉 Sie können die Ursache des Problems ermitteln, indem Sie zu den genauen Traces gehen, die das Problem verursachen, und detaillierte Flammenbilder einzelner Anfragetraces anzeigen.
👉 Führen Sie Aggregationen auf Trace-Daten durch, um geschäftsrelevante Metriken zu erhalten.
👉 Filtern und Abfragen von Logs, Erstellen von Dashboards und Benachrichtigungen basierend auf Attributen in den Logs.
👉 Automatische Aufzeichnung von Ausnahmen in Python, Java, Ruby und Javascript.
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
##
### Anwendung Metriken
![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
### Verteiltes Tracing
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
### Log Verwaltung
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
### Infrastruktur Überwachung
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### Exceptions Monitoring
![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
### Alarme
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Werde Teil unserer Slack Community
@@ -77,22 +42,20 @@ Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Funktionen:
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
- Überblick über Anwendungsmetriken wie RPS, Latenzzeiten des 50tes/90tes/99tes Perzentils und Fehlerquoten.
- Langsamste Endpunkte in Ihrer Anwendung.
- Zeigen Sie genaue Anfragetraces an, um Probleme in nachgelagerten Diensten, langsamen Datenbankabfragen oder Aufrufen von Drittanbieterdiensten wie Zahlungsgateways zu identifizieren.
- Filtern Sie Traces nach Dienstname, Operation, Latenz, Fehler, Tags/Annotationen.
- Führen Sie Aggregationen auf Trace-Daten (Ereignisse/Spans) durch, um geschäftsrelevante Metriken zu erhalten. Beispielsweise können Sie die Fehlerquote und die 99tes Perzentillatenz für `customer_type: gold` oder `deployment_version: v2` oder `external_call: paypal` erhalten.
- Native Unterstützung für OpenTelemetry-Logs, erweiterten Log-Abfrage-Builder und automatische Log-Sammlung aus dem Kubernetes-Cluster.
- Blitzschnelle Log-Analytik ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- End-to-End-Sichtbarkeit der Infrastrukturleistung, Aufnahme von Metriken aus allen Arten von Host-Umgebungen.
- Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
- Übersicht der langsamsten Endpunkte deiner Anwendung.
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Wieso SigNoz?
@@ -102,46 +65,45 @@ Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDo
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
### Languages supported:
### Unterstützte Programmiersprachen:
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als Bibliothek, mit der Sie Ihre Anwendungen instrumentieren können. Daher wird jedes von OpenTelemetry unterstützte Framework und jede Sprache auch von SignNoz unterstützt. Einige der wichtigsten unterstützten Sprachen sind:
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
- Java
- Python
- NodeJS
- Go
- PHP
- .NET
- Ruby
- Elixir
- Rust
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Erste Schritte mit SigNoz
### Bereitstellung mit Docker
Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/install/troubleshooting/) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
<p>&nbsp </p>
### Deploy in Kubernetes using Helm
### Bereitstellung mit Kubernetes und Helm
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Vergleiche mit bekannten Tools
## Vergleiche mit anderen Lösungen
### SigNoz vs Prometheus
### SigNoz vs. Prometheus
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
@@ -149,79 +111,49 @@ Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzels
<p>&nbsp </p>
### SigNoz vs Jaeger
### SigNoz vs. Jaeger
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an.
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag `customer_type=premium`, was hingegen mit SigNoz leicht umsetzbar ist.
<p>&nbsp </p>
### SigNoz vs Elastic
- Die Verwaltung von SigNoz-Protokollen basiert auf 'ClickHouse', einem spaltenbasierten OLAP-Datenspeicher, der aggregierte Protokollanalyseabfragen wesentlich effizienter macht.
- 50 % geringerer Ressourcenbedarf im Vergleich zu Elastic während der Aufnahme.
Wir haben Benchmarks veröffentlicht, die Elastic mit SignNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz unterstützt Aggregationen von Daten mit hoher Kardinalität über ein großes Volumen, Loki hingegen nicht.
- SigNoz unterstützt Indizes über Daten mit hoher Kardinalität und hat keine Beschränkungen hinsichtlich der Anzahl der Indizes, während Loki maximale Streams erreicht, wenn ein paar Indizes hinzugefügt werden.
- Das Durchsuchen großer Datenmengen ist in Loki im Vergleich zu SigNoz schwierig und langsam.
Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Zum Projekt beitragen
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
### Unsere Projektbetreuer
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Dokumentation
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Gemeinschaft
## Community
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
Wie immer, Dank an unsere großartigen Mitwirkenden!
Wie immer, danke an unsere großartigen Unterstützer!
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>

View File

@@ -5,7 +5,7 @@
</p>
<p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
@@ -70,6 +70,7 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Join our Slack community
@@ -77,6 +78,7 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Features:
@@ -87,12 +89,13 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
- Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- Lightening quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
- Easy to set alerts with DIY query builder
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Why SigNoz?
@@ -121,14 +124,15 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Getting Started
### Deploy using Docker
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshooting) may be helpful if you face any issues.
<p>&nbsp </p>
@@ -139,6 +143,7 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparisons to Familiar Tools
@@ -180,6 +185,7 @@ We have published benchmarks comparing Loki with SigNoz. Check it out [here](htt
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Contributing
@@ -199,16 +205,14 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
- [Dhawal Sanghvi](https://github.com/dhawal1248)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Documentation
@@ -216,6 +220,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Community

View File

@@ -84,9 +84,9 @@ Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry
### Implantar usando Docker
Siga as etapas listadas [aqui](https://signoz.io/docs/install/docker/) para instalar usando o Docker.
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
Esse [guia para solução de problemas](https://signoz.io/docs/install/troubleshooting/) pode ser útil se você enfrentar quaisquer problemas.
Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
<p>&nbsp </p>

View File

@@ -1,225 +1,170 @@
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">你的应用,并可排查已部署应用的问题,这是一个可替代 DataDog、NewRelic 的开源方案</p>
<p align="center">你的应用,并可排查已部署应用的问题,这是一个开源的可替代DataDog、NewRelic方案</p>
</p>
<p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p>
<h3 align="center">
<a href="https://signoz.io/docs"><b>文档</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a>
<a href="https://signoz.io/slack"><b>Slack 社区</b></a>
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
##
SigNoz 帮助开发人员监控应用并排查已部署应用的问题。你可以使用 SigNoz 实现如下能力:
SigNoz帮助开发人员监控应用并排查已部署应用的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
👉 在同一块面板上,可视化 Metrics, Traces 和 Logs 内容
👉 你能看到一些性能指标服务、外部api调用、每个终端(endpoint)的p99延迟和错误率
👉 你可以关注服务的 p99 延迟和错误率, 包括外部 API 调用和个别的端点
👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因
👉 你可以找到问题的根因,通过提取相关问题的 traces 日志、单独查看请求 traces 的火焰图详情
👉 聚合trace数据来获得业务相关指标
👉 执行 trace 数据聚合,以获取业务相关的 metrics
👉 对日志过滤和查询,通过日志的属性建立看板和告警
👉 通过 PythonjavaRuby 和 Javascript 自动记录异常
👉 轻松的自定义查询和设置告警
### 应用 Metrics 展示
![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
### 分布式追踪
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
### 日志管理
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
### 基础设施监控
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### 异常监控
![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
### 告警
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
<br />
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br />
![screenzy-1647005040573](https://user-images.githubusercontent.com/504541/157875938-a3d57904-ea6d-4278-b929-bd1408d7f94c.png)
<br /><br />
## 加入我们 Slack 社区
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
来 [Slack](https://signoz.io/slack) 和我们打招呼吧 👋
## 加入我们的Slack社区
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
<br /><br />
## 特性:
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
- 为 metrics, traces and logs 制定统一的 UI。 无需切换 Prometheus 到 Jaeger 去查找问题,也无需使用想 Elastic 这样的日志工具分开你的 metrics 和 traces
## 功能:
- 默认统计应用的 metrics 数据,像 RPS (每秒请求数) 50th/90th/99th 的分位数延迟数据,还有相关的错误率
- 找到应用中最慢的端点
- 查看准确的请求跟踪数据,找到下游服务的问题了,比如 DB 慢查询,或者调用第三方的支付网关等
- 通过 服务名、操作方式、延迟、错误、标签/注释 过滤 traces 数据
- 通过聚合 trace 数据而获得业务相关的 metrics。 比如你可以通过 `customer_type: gold` 或者 `deployment_version: v2` 或者 `external_call: paypal` 获取错误率和 P99 延迟数据
- 原生支持 OpenTelemetry 日志,高级日志查询,自动收集 k8s 相关日志
- 快如闪电的日志分析 ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- 可视化点到点的基础设施性能,提取有所有类型机器的 metrics 数据
- 轻易自定义告警查询
- 应用概览指标(metrics)如RPS, p50/p90/p99延迟率分位值错误率等。
- 应用中最慢的终端(endpoint)
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
<br /><br />
## 为什么使用 SigNoz?
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
作为开发者, 我们发现 SaaS 厂商对一些大家想要的小功能都是闭源的,这种行为真的让人有点恼火。 闭源厂商还会在月底给你一张没有明细的巨额账单。
## 为何选择SigNoz
我们想做一个自托管并且可开源的工具,像 DataDog 和 NewRelic 那样, 为那些担心数据隐私和安全的公司提供第三方服务
作为开发人员我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦闭源厂商通常会给你一份巨额月付账单但不提供足够的透明度你不知道你为哪些功能付费
作为开源的项目,你完全可以自己掌控你的配置、样本和更新。你同样可以基于 SigNoz 拓展特定的业务模块。
我们想做一个自服务的开源版本的工具类似于DataDog和NewRelic用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
### 支持的编程语言:
开源也让你对配置、采样和正常运行时间有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
我们支持 [OpenTelemetry](https://opentelemetry.io)。作为一个观测你应用的库文件。所以任何 OpenTelemetry 支持的框架和语言,对于 SigNoz 也同样支持。 一些主要支持的语言如下:
### 语言支持
我们支持[OpenTelemetry](https://opentelemetry.io)库你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
- Java
- Python
- NodeJS
- Go
- PHP
- .NET
- Ruby
- Elixir
- Rust
你可以在这里找到全部支持的语言列表 - https://opentelemetry.io/docs/
你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
<br /><br />
## 让我们开始吧
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
### 使用 Docker 部署
## 入门
请一步步跟随 [这里](https://signoz.io/docs/install/docker/) 通过 docker 来安装。
这个 [排障说明书](https://signoz.io/docs/install/troubleshooting/) 可以帮助你解决碰到的问题。
### 使用Docker部署
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
<p>&nbsp </p>
### 使用 Helm 在 Kubernetes 部署
请一步步跟随 [这里](https://signoz.io/docs/deployment/helm_chart) 通过 helm 来安装
### 使用Helm在Kubernetes上部署
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
<br /><br />
## 比较相似的工具
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## 与其他方案的比较
### SigNoz vs Prometheus
Prometheus 是一个针对 metrics 监控的强大工具。但是如果你无缝的切换 metricstraces 查询,你当前大概率需要在 Prometheus Jaeger 之间切换
如果你只是需要监控指标(metrics)那Prometheus是不错的如果你无缝的metricstraces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好
我们的目标是提供一个客户观测 metricstraces 整合的 UI。就像 SaaS 供应商 DataDog,它提供很多 jaeger 缺失的功能,比如针对 traces 过滤功能和聚合功能。
我们的目标是metricstraces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合这是目前Jaeger缺失的功能。
<p>&nbsp </p>
### SigNoz vs Jaeger
Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metrics, traceslogs 所有的观测
Jaeger只做分布式追踪(distributed tracing)SigNoz则支持metrics,traces,logs ,即可视化的三大支柱
而且, SigNoz 相较于 Jaeger 拥有更对的高级功能:
并且SigNoz有一些Jaeger没有的高级功能
- Jaegar UI 不能提供任何基于 traces 的 metrics 查询和过滤。
- Jaeger 不能针对过滤的 traces 做聚合。 比如, p99 延迟的请求有个标签是 customer_type='premium'。 而这些在 SigNoz 可以轻松做到。
<p>&nbsp </p>
### SigNoz vs Elastic
- SigNoz 的日志管理是基于 ClickHouse 实现的,可以使日志的聚合更加高效,因为它是基于 OLAP 的数据仓储。
- 与 Elastic 相比,可以节省 50% 的资源成本
我们已经公布了 Elastic 和 SigNoz 的性能对比。 请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz 支持大容量高基数的聚合,但是 loki 是不支持的。
- SigNoz 支持索引的高基数查询,并且对索引没有数量限制,而 Loki 会在添加部分索引后到达最大上限。
- 相较于 SigNozLoki 在搜索大量数据下既困难又缓慢。
我们已经发布了基准测试对比 Loki 和 SigNoz 性能。请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
- Jaegar UI无法在traces或过滤的traces上展示metrics。
- Jaeger不能对过滤的traces做聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## 贡献
我们 ❤️ 你的贡献,无论大小。 请先阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 再开始给 SigNoz 做贡献。
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献
### 项目维护人员
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
#### 后端
### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### 前端
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
#### 运维开发
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## 文档
你可以通过 https://signoz.io/docs/ 找到相关文档。如果你需要阐述问题或者发现一些确实的事件, 通过标签 `documentation` 提交 Github 问题。或者通过 slack 社区频道
文档在这里:https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失请在Github里发一个问题并使用标签 `documentation` 或者在社区stack频道里告诉我们
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## 社区
加入 [slack 社区](https://signoz.io/slack)了解更多关于分布式踪、可观测性系统 。或者与 SigNoz 其他用户和贡献者交流。
加入[slack community](https://signoz.io/slack)了解更多关于分布式踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
如果你有任何想法、问题或者任何反馈, 请通过 [Github Discussions](https://github.com/SigNoz/signoz/discussions) 分享。
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们
不管怎么样,感谢这个项目的所有贡献者!
最后,感谢我们这些优秀的贡献者们。
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>

View File

@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```
To generate load:
@@ -66,7 +66,7 @@ To generate load:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
To stop load:

View File

@@ -7,21 +7,9 @@
</default>
<s3>
<type>s3</type>
<!-- For S3 cold storage,
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3>
</disks>
<policies>

View File

@@ -1,7 +1,7 @@
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:23.11.1-alpine
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
deploy:
restart_policy:
@@ -16,14 +16,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8123/ping"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
@@ -33,17 +26,15 @@ x-clickhouse-defaults: &clickhouse-defaults
soft: 262144
hard: 262144
x-db-depend: &db-depend
x-clickhouse-depend: &clickhouse-depend
depends_on:
- clickhouse
- otel-collector-migrator
# - clickhouse-2
# - clickhouse-3
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
image: bitnami/zookeeper:3.7.0
hostname: zookeeper-1
user: root
ports:
@@ -133,7 +124,7 @@ services:
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.4
image: signoz/alertmanager:0.23.0-0.2
volumes:
- ./data/alertmanager:/data
command:
@@ -146,12 +137,8 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.36.0
command:
[
"-config=/root/config/prometheus.yml",
"--prefer-delta=true"
]
image: signoz/query-service:0.17.0
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
@@ -169,24 +156,17 @@ services:
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
<<: *db-depend
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.36.0
image: signoz/frontend:0.17.0
deploy:
restart_policy:
condition: on-failure
@@ -199,17 +179,11 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.88.4
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
image: signoz/signoz-otel-collector:0.66.6
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
@@ -217,8 +191,8 @@ services:
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
@@ -231,31 +205,11 @@ services:
mode: global
restart_policy:
condition: on-failure
depends_on:
- clickhouse
- otel-collector-migrator
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.88.4
deploy:
restart_policy:
condition: on-failure
delay: 5s
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.88.4
command:
[
"--config=/etc/otel-collector-metrics-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
image: signoz/signoz-otel-collector:0.66.6
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
# ports:
@@ -266,24 +220,11 @@ services:
deploy:
restart_policy:
condition: on-failure
<<: *db-depend
logspout:
image: "gliderlabs/logspout:v3.2.14"
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
deploy:
mode: global
restart_policy:
condition: on-failure
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:1.30
command: [ "all" ]
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
logging:
@@ -292,7 +233,7 @@ services:
max-file: "3"
load-hotrod:
image: "signoz/locust:1.2.3"
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080

View File

@@ -1,21 +1,29 @@
receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
filelog/dockercontainers:
include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
- type: json_parser
id: parser-docker
output: extract_metadata_from_filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: extract_metadata_from_filepath
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
parse_from: attributes["log.file.path"]
output: parse_body
- type: move
id: parse_body
from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus:
endpoint: 0.0.0.0:55678
otlp/spanmetrics:
@@ -67,7 +75,7 @@ processors:
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
@@ -158,6 +166,6 @@ service:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
logs:
receivers: [otlp, tcplog/docker]
receivers: [otlp, filelog/dockercontainers]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@@ -1 +0,0 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -24,16 +24,8 @@ server {
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
location /api/alertmanager {
proxy_pass http://alertmanager:9093/api/v2;
}
location /api {

View File

@@ -0,0 +1,51 @@
modules:
http_2xx:
prober: http
http:
preferred_ip_protocol: ip4
http_post_2xx:
prober: http
http:
method: POST
tcp_connect:
prober: tcp
pop3s_banner:
prober: tcp
tcp:
query_response:
- expect: "^+OK"
tls: true
tls_config:
insecure_skip_verify: false
grpc:
prober: grpc
grpc:
tls: true
preferred_ip_protocol: "ip4"
grpc_plain:
prober: grpc
grpc:
tls: false
service: "service1"
ssh_banner:
prober: tcp
tcp:
query_response:
- expect: "^SSH-2.0-"
- send: "SSH-2.0-blackbox-ssh-check"
irc_banner:
prober: tcp
tcp:
query_response:
- send: "NICK prober"
- send: "USER prober prober prober :prober"
- expect: "PING :([^ ]+)"
send: "PONG ${1}"
- expect: "^:[^ ]+ 001"
icmp:
prober: icmp
icmp_ttl5:
prober: icmp
timeout: 5s
icmp:
ttl: 5

View File

@@ -7,21 +7,9 @@
</default>
<s3>
<type>s3</type>
<!-- For S3 cold storage,
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3>
</disks>
<policies>

View File

@@ -1,26 +1,9 @@
version: "2.4"
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:23.7.3-alpine
container_name: signoz-clickhouse
image: clickhouse/clickhouse-server:22.8.8-alpine
container_name: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
@@ -28,11 +11,8 @@ services:
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
restart: on-failure
logging:
options:
@@ -40,21 +20,14 @@ services:
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8123/ping"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
container_name: signoz-alertmanager
image: signoz/alertmanager:0.23.4
container_name: alertmanager
image: signoz/alertmanager:0.23.0-0.2
volumes:
- ./data/alertmanager:/data
depends_on:
@@ -65,40 +38,20 @@ services:
- --queryService.url=http://query-service:8085
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.4}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.88.4
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
container_name: otel-collector
image: signoz/signoz-otel-collector:0.66.6
command: ["--config=/etc/otel-collector-config.yaml"]
# user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
@@ -111,19 +64,11 @@ services:
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
otel-collector-metrics:
container_name: signoz-otel-collector-metrics
image: signoz/signoz-otel-collector:0.88.4
command:
[
"--config=/etc/otel-collector-metrics-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
container_name: otel-collector-metrics
image: signoz/signoz-otel-collector:0.66.6
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
# ports:
@@ -135,19 +80,6 @@ services:
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
@@ -156,12 +88,12 @@ services:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
environment:

View File

@@ -4,12 +4,12 @@ services:
query-service:
hostname: query-service
build:
context: "../../../"
dockerfile: "./pkg/query-service/Dockerfile"
context: "../../../pkg/query-service"
dockerfile: "./Dockerfile"
args:
LDFLAGS: ""
TARGETPLATFORM: "${GOOS}/${GOARCH}"
container_name: signoz-query-service
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
container_name: query-service
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
@@ -22,24 +22,13 @@ services:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
command:
[
"-config=/root/config/prometheus.yml",
"--prefer-delta=true"
]
command: ["-config=/root/config/prometheus.yml"]
ports:
- "6060:6060"
- "8080:8080"
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
@@ -52,9 +41,9 @@ services:
context: "../../../frontend"
dockerfile: "./Dockerfile"
args:
TARGETOS: "${GOOS}"
TARGETPLATFORM: "${GOARCH}"
container_name: signoz-frontend
TARGETOS: "${LOCAL_GOOS}"
TARGETPLATFORM: "${LOCAL_GOARCH}"
container_name: frontend
environment:
- FRONTEND_API_ENDPOINT=http://query-service:8080
restart: on-failure

View File

@@ -2,8 +2,7 @@ version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:23.11.1-alpine
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
depends_on:
- zookeeper-1
@@ -15,14 +14,7 @@ x-clickhouse-defaults: &clickhouse-defaults
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8123/ping"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
@@ -32,12 +24,10 @@ x-clickhouse-defaults: &clickhouse-defaults
soft: 262144
hard: 262144
x-db-depend: &db-depend
x-clickhouse-depend: &clickhouse-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
@@ -46,8 +36,8 @@ x-db-depend: &db-depend
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
image: bitnami/zookeeper:3.7.0
container_name: zookeeper-1
hostname: zookeeper-1
user: root
ports:
@@ -64,7 +54,7 @@ services:
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# container_name: zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
@@ -81,7 +71,7 @@ services:
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# container_name: zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
@@ -98,7 +88,7 @@ services:
clickhouse:
<<: *clickhouse-defaults
container_name: signoz-clickhouse
container_name: clickhouse
hostname: clickhouse
ports:
- "9000:9000"
@@ -115,7 +105,7 @@ services:
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# container_name: clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
@@ -133,7 +123,7 @@ services:
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# container_name: clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
@@ -149,8 +139,7 @@ services:
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.4}
container_name: signoz-alertmanager
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.0-0.2}
volumes:
- ./data/alertmanager:/data
depends_on:
@@ -161,16 +150,12 @@ services:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.36.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"--prefer-delta=true"
]
image: signoz/query-service:${DOCKER_TAG:-0.17.0}
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
@@ -189,22 +174,15 @@ services:
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
<<: *clickhouse-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.36.0}
container_name: signoz-frontend
image: signoz/frontend:${DOCKER_TAG:-0.17.0}
container_name: frontend
restart: on-failure
depends_on:
- alertmanager
@@ -214,34 +192,12 @@ services:
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.4}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.4}
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.6}
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
@@ -249,8 +205,8 @@ services:
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
@@ -260,22 +216,11 @@ services:
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.4}
container_name: signoz-otel-collector-metrics
command:
[
"--config=/etc/otel-collector-metrics-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.6}
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
# ports:
@@ -284,18 +229,14 @@ services:
# - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure
<<: *db-depend
<<: *clickhouse-depend
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
blackbox:
image: quay.io/prometheus/blackbox-exporter
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure
- ./blackbox.yml:/etc/blackbox_exporter/config.yml
ports:
- '9115:9115'
hotrod:
image: jaegertracing/example-hotrod:1.30
@@ -304,12 +245,12 @@ services:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
environment:

View File

@@ -1,21 +1,29 @@
receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
filelog/dockercontainers:
include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
- type: json_parser
id: parser-docker
output: extract_metadata_from_filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: extract_metadata_from_filepath
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
parse_from: attributes["log.file.path"]
output: parse_body
- type: move
id: parse_body
from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus:
endpoint: 0.0.0.0:55678
otlp/spanmetrics:
@@ -96,7 +104,7 @@ processors:
# retry_on_failure: true
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
extensions:
@@ -163,6 +171,6 @@ service:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
logs:
receivers: [otlp, tcplog/docker]
receivers: [otlp, filelog/dockercontainers]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@@ -21,6 +21,22 @@ receivers:
- targets:
- otel-collector:8889
- job_name: 'blackbox'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://signoz.io
- https://google.com
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: endpoint
- target_label: __address__
replacement: blackbox:9115
processors:
batch:
send_batch_size: 10000

View File

@@ -1 +0,0 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -0,0 +1,144 @@
SSL certificate expiry and uptime monitoring using SigNoz
## Run blackbox exporter
Run blackbox exporter as a part of the SigNoz deployment. The default configuration is decent enough for most use cases.
```yaml
modules:
http_2xx:
prober: http
http:
preferred_ip_protocol: ip4
http_post_2xx:
prober: http
http:
method: POST
tcp_connect:
prober: tcp
pop3s_banner:
prober: tcp
tcp:
query_response:
- expect: "^+OK"
tls: true
tls_config:
insecure_skip_verify: false
grpc:
prober: grpc
grpc:
tls: true
preferred_ip_protocol: "ip4"
grpc_plain:
prober: grpc
grpc:
tls: false
service: "service1"
ssh_banner:
prober: tcp
tcp:
query_response:
- expect: "^SSH-2.0-"
- send: "SSH-2.0-blackbox-ssh-check"
irc_banner:
prober: tcp
tcp:
query_response:
- send: "NICK prober"
- send: "USER prober prober prober :prober"
- expect: "PING :([^ ]+)"
send: "PONG ${1}"
- expect: "^:[^ ]+ 001"
icmp:
prober: icmp
icmp_ttl5:
prober: icmp
timeout: 5s
icmp:
ttl: 5
```
## Configure OTel Collector to scrape metrics from blackbox exporter
Add scrape job for blackbox exporter in the otel-collector-metrics.yaml
Example:
```yaml
- job_name: 'blackbox'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://signoz.io
- https://google.com
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: endpoint
- target_label: __address__
replacement: blackbox:9115
```
The above scrape job will scrape metrics from the 'blackbox:9115/probe' endpoint of the blackbox exporter. The receiver
sends a request to `blackbox:9115/probe?target=https://example.com&module=http_2xx` for each target in the static
config. The blackbox exporter returns the metrics for the probe.
Note: the `__address__` value is the host:post of the real blackbox exporter used by the receiver.
This will create a bunch of metrics for each target. The metrics are prefixed with `probe_` and have labels like `endpoint`,
`phase`.
We are interested in the `probe_ssl_earliest_cert_expiry` metric. This metric has a value of the earliest expiry date of the
certificates in the chain. The value is in unix timestamp format. We can use this metric to create a dashboard in SigNoz.
## Create a dashboard in SigNoz
Create a dashboard in SigNoz to monitor the expiry of the certificates. The dashboard will show the earliest expiry date of
the certificates in the chain for each target.
The dashboard will have a value widget to show the earliest expiry date of the certificates in the chain. The widget will
have a filter for the target. The filter will be a dropdown with the list of targets.
```sql
SELECT DISTINCT JSONExtractString(labels, 'endpoint') AS endpoint
FROM signoz_metrics.time_series_v2
WHERE metric_name = 'probe_ssl_earliest_cert_expiry'
```
The above query will return the list of targets. The query will be used to populate the dropdown filter.
PromQL Expression for the value widget:
```bash
probe_ssl_earliest_cert_expiry{endpoint="{{.endpoint}}"} - time()
```
And set the unit to `seconds`.
## Create an alert
Create an alert to notify when the certificate is about to expire. The alert will be triggered when the earliest expiry date
of the certificates in the chain is less than 90 days.
PromQL Expression for the alert:
```
probe_ssl_earliest_cert_expiry{endpoint="{{.endpoint}}"} - time() < 7776000
```
## Uptime monitoring
The blackbox exporter can also be used to monitor the uptime of the targets.
PromQL Expression for the alert:
```
probe_success{endpoint="{{.endpoint}}"} == 0
```

View File

@@ -24,16 +24,8 @@ server {
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
location /api/alertmanager {
proxy_pass http://alertmanager:9093/api/v2;
}
location /api {

View File

@@ -36,9 +36,9 @@ is_mac() {
[[ $OSTYPE == darwin* ]]
}
is_arm64(){
[[ `uname -m` == 'arm64' || `uname -m` == 'aarch64' ]]
}
# is_arm64(){
# [[ `uname -m` == 'arm64' ]]
# }
check_os() {
if is_mac; then
@@ -48,16 +48,6 @@ check_os() {
return
fi
if is_arm64; then
arch="arm64"
arch_official="aarch64"
else
arch="amd64"
arch_official="x86_64"
fi
platform=$(uname -s | tr '[:upper:]' '[:lower:]')
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in
@@ -135,7 +125,7 @@ check_ports_occupied() {
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
exit 1
@@ -153,7 +143,7 @@ install_docker() {
$apt_cmd install software-properties-common gnupg-agent
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
$sudo_cmd add-apt-repository \
"deb [arch=$arch] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
$apt_cmd update
echo "Installing docker"
$apt_cmd install docker-ce docker-ce-cli containerd.io
@@ -188,20 +178,12 @@ install_docker() {
}
compose_version () {
local compose_version
compose_version="$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\" -f4)"
echo "${compose_version:-v2.18.1}"
}
install_docker_compose() {
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
if [[ ! -f /usr/bin/docker-compose ]];then
echo "++++++++++++++++++++++++"
echo "Installing docker-compose"
compose_url="https://github.com/docker/compose/releases/download/$(compose_version)/docker-compose-$platform-$arch_official"
echo "Downloading docker-compose from $compose_url"
$sudo_cmd curl -L "$compose_url" -o /usr/local/bin/docker-compose
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
$sudo_cmd chmod +x /usr/local/bin/docker-compose
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
echo "docker-compose installed!"
@@ -267,7 +249,7 @@ bye() { # Prints a friendly good bye message and exits the script.
echo ""
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
@@ -295,7 +277,7 @@ request_sudo() {
echo -e "\n\n🙇 We will need sudo access to complete the installation."
if (( $EUID != 0 )); then
sudo_cmd="sudo"
echo -e "Please enter your sudo password, if prompted."
echo -e "Please enter your sudo password, if prompt."
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
# echo "Need sudo privileges to proceed with the installation."
@@ -518,7 +500,7 @@ if [[ $status_code -ne 200 ]]; then
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
@@ -534,7 +516,7 @@ else
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo " By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"

View File

@@ -1,14 +0,0 @@
{
"name": "e2e",
"version": "1.0.0",
"main": "index.js",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.22.0",
"@types/node": "^20.9.2"
},
"scripts": {},
"dependencies": {
"dotenv": "8.2.0"
}
}

View File

@@ -1,46 +0,0 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
dotenv.config();
export default defineConfig({
testDir: "./tests",
fullyParallel: true,
forbidOnly: !!process.env.CI,
name: "Signoz E2E",
retries: process.env.CI ? 2 : 0,
reporter: process.env.CI ? "github" : "list",
preserveOutput: "always",
updateSnapshots: "all",
quiet: false,
testMatch: ["**/*.spec.ts"],
use: {
trace: "on-first-retry",
baseURL:
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
},
projects: [
{ name: "setup", testMatch: /.*\.setup\.ts/ },
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
// Use prepared auth state.
storageState: ".auth/user.json",
},
dependencies: ["setup"],
},
],
});

View File

@@ -1,37 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import dotenv from "dotenv";
dotenv.config();
const authFile = ".auth/user.json";
test("E2E Login Test", async ({ page }) => {
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
const signup = "Monitor your applications. Find what is causing issues.";
const el = await page.locator(`text=${signup}`);
expect(el).toBeVisible();
await page
.locator("id=loginEmail")
.type(
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
);
await page.getByText("Next").click();
await page
.locator('input[id="currentPassword"]')
.fill(
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
);
await page.locator('button[data-attr="signup"]').click();
await expect(page).toHaveURL(ROUTES.APPLICATION);
await page.context().storageState({ path: authFile });
});

View File

@@ -1,10 +0,0 @@
export const SERVICE_TABLE_HEADERS = {
APPLICATION: "Applicaton",
P99LATENCY: "P99 latency (in ms)",
ERROR_RATE: "Error Rate (% of total)",
OPS_PER_SECOND: "Operations Per Second",
};
export const DATA_TEST_IDS = {
NEW_DASHBOARD_BTN: "create-new-dashboard",
};

View File

@@ -1,40 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
test("Basic Navigation Check across different resources", async ({ page }) => {
// route to services page and check if the page renders fine with BE contract
await Promise.all([
page.goto(ROUTES.APPLICATION),
page.waitForRequest("**/v1/services"),
]);
const p99Latency = page.locator(
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
);
await expect(p99Latency).toBeVisible();
// route to the new trace explorer page and check if the page renders fine
await page.goto(ROUTES.TRACES_EXPLORER);
await page.waitForLoadState("networkidle");
const listViewTable = await page
.locator('div[role="presentation"]')
.isVisible();
expect(listViewTable).toBeTruthy();
// route to the dashboards page and check if the page renders fine
await Promise.all([
page.goto(ROUTES.ALL_DASHBOARD),
page.waitForRequest("**/v1/dashboards"),
]);
const newDashboardBtn = await page
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
.isVisible();
expect(newDashboardBtn).toBeTruthy();
});

View File

@@ -1,46 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.22.0":
version "1.40.0"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
dependencies:
playwright "1.40.0"
"@types/node@^20.9.2":
version "20.9.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
dependencies:
undici-types "~5.26.4"
dotenv@8.2.0:
version "8.2.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
playwright@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
dependencies:
playwright-core "1.40.0"
optionalDependencies:
fsevents "2.3.2"
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==

View File

@@ -1,31 +1,48 @@
FROM golang:1.18-buster AS builder
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
ARG LD_FLAGS
ARG TARGETPLATFORM
ENV CGO_ENABLED=1
ENV GOPATH=/go
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
# Prepare and enter src directory
WORKDIR /go/src/github.com/signoz/signoz
# Add the sources and proceed with build
ADD . .
RUN cd ee/query-service \
&& go build -tags timetzdata -a -o ./bin/query-service \
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
&& chmod +x ./bin/query-service
# use a minimal alpine image
FROM alpine:3.18.5
FROM alpine:3.7
# Add Maintainer Info
LABEL maintainer="signoz"
# define arguments that can be passed during build time
ARG TARGETOS TARGETARCH
# add ca-certificates in case you need them
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
# copy the query-service binary
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
# copy the binary from builder
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# run the binary
ENTRYPOINT ["./query-service"]
CMD ["-config", "/root/config/prometheus.yml"]
CMD ["-config", "../config/prometheus.yml"]
# CMD ["./query-service -config /root/config/prometheus.yml"]
EXPOSE 8080

View File

@@ -2,16 +2,12 @@ package api
import (
"net/http"
"time"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/cache"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules"
@@ -19,22 +15,11 @@ import (
)
type APIHandlerOptions struct {
DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig
PreferDelta bool
PreferSpanMetrics bool
MaxIdleConns int
MaxOpenConns int
DialTimeout time.Duration
AppDao dao.ModelDao
RulesManager *rules.Manager
UsageManager *usage.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Cache cache.Cache
// Querier Influx Interval
FluxInterval time.Duration
DataConnector interfaces.DataConnector
AppDao dao.ModelDao
RulesManager *rules.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
}
type APIHandler struct {
@@ -46,20 +31,10 @@ type APIHandler struct {
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig,
PerferDelta: opts.PreferDelta,
PreferSpanMetrics: opts.PreferSpanMetrics,
MaxIdleConns: opts.MaxIdleConns,
MaxOpenConns: opts.MaxOpenConns,
DialTimeout: opts.DialTimeout,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
})
Reader: opts.DataConnector,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags})
if err != nil {
return nil, err
@@ -84,10 +59,6 @@ func (ah *APIHandler) LM() *license.Manager {
return ah.opts.LicenseManager
}
func (ah *APIHandler) UM() *usage.Manager {
return ah.opts.UsageManager
}
func (ah *APIHandler) AppDao() dao.ModelDao {
return ah.opts.AppDao
}
@@ -156,17 +127,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet)
ah.APIHandler.RegisterRoutes(router, am)
}

View File

@@ -5,23 +5,22 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"github.com/gorilla/mux"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func parseRequest(r *http.Request, req interface{}) error {
defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body)
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
@@ -72,7 +71,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
var req *baseauth.RegisterRequest
defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body)
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("received no input in api\n", err)
RespondError(w, model.BadRequest(err), nil)
@@ -108,13 +107,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
}
precheckResp := &basemodel.PrecheckResponse{
precheckResp := &model.PrecheckResponse{
SSO: false,
IsUser: false,
}
if domain != nil && domain.SsoEnabled {
// sso is enabled, create user and respond precheck data
// so is enabled, create user and respond precheck data
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
if apierr != nil {
RespondError(w, apierr, nil)

View File

@@ -1,51 +0,0 @@
package api
import (
"github.com/gorilla/mux"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
"net/http"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, true)
}
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, false)
}
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
// Locking can only be done by the owner of the dashboard
// or an admin
// - Fetch the dashboard
// - Check if the user is the owner or an admin
// - If yes, lock/unlock the dashboard
// - If no, return 403
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}
user := common.GetUserFromContext(r.Context())
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
return
}
// Lock/Unlock the dashboard
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}
ah.Respond(w, "Dashboard updated successfully")
}

View File

@@ -2,23 +2,9 @@ package api
import (
"net/http"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
featureSet, err := ah.FF().GetFeatureFlags()
if err != nil {
ah.HandleError(w, err, http.StatusInternalServerError)
return
}
if ah.opts.PreferSpanMetrics {
for idx := range featureSet {
feature := &featureSet[idx]
if feature.Name == basemodel.UseSpanMetrics {
featureSet[idx].Active = true
}
}
}
featureSet := ah.FF().GetFeatureFlags()
ah.Respond(w, featureSet)
}

View File

@@ -4,45 +4,10 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.uber.org/zap"
"net/http"
)
type tierBreakdown struct {
UnitPrice float64 `json:"unitPrice"`
Quantity float64 `json:"quantity"`
TierStart int64 `json:"tierStart"`
TierEnd int64 `json:"tierEnd"`
TierCost float64 `json:"tierCost"`
}
type usageResponse struct {
Type string `json:"type"`
Unit string `json:"unit"`
Tiers []tierBreakdown `json:"tiers"`
}
type details struct {
Total float64 `json:"total"`
Breakdown []usageResponse `json:"breakdown"`
BaseFee float64 `json:"baseFee"`
BillTotal float64 `json:"billTotal"`
}
type billingDetails struct {
Status string `json:"status"`
Data struct {
BillingPeriodStart int64 `json:"billingPeriodStart"`
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
Details details `json:"details"`
Discount float64 `json:"discount"`
} `json:"data"`
}
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
@@ -52,6 +17,7 @@ func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
}
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
var l model.License
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
@@ -63,7 +29,8 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
license, apiError := ah.LM().Activate(r.Context(), l.Key)
license, apiError := ah.LM().Activate(ctx, l.Key)
if apiError != nil {
RespondError(w, apiError, nil)
return
@@ -71,186 +38,3 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, license)
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
}
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
}
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
licenseKey := r.URL.Query().Get("licenseKey")
if licenseKey == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
billingURL := fmt.Sprintf("%s/usage?licenseKey=%s", constants.LicenseSignozIo, licenseKey)
hClient := &http.Client{}
req, err := http.NewRequest("GET", billingURL, nil)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
billingResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
// decode response body
var billingResponse billingDetails
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
// TODO(srikanthccv):Fetch the current day usage and add it to the response
ah.Respond(w, billingResponse.Data)
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
RespondError(w, apiError, nil)
}
resp := model.Licenses{
TrialStart: -1,
TrialEnd: -1,
OnTrial: false,
WorkSpaceBlock: false,
TrialConvertedToSubscription: false,
GracePeriodEnd: -1,
Licenses: licenses,
}
var currentActiveLicenseKey string
for _, license := range licenses {
if license.IsCurrent {
currentActiveLicenseKey = license.Key
}
}
// For the case when no license is applied i.e community edition
// There will be no trial details or license details
if currentActiveLicenseKey == "" {
ah.Respond(w, resp)
return
}
// Fetch trial details
hClient := &http.Client{}
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
zap.S().Error("Error while creating request for trial details", err)
// If there is an error in fetching trial details, we will still return the license details
// to avoid blocking the UI
ah.Respond(w, resp)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
trialResp, err := hClient.Do(req)
if err != nil {
zap.S().Error("Error while fetching trial details", err)
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
defer trialResp.Body.Close()
trialRespBody, err := io.ReadAll(trialResp.Body)
if err != nil || trialResp.StatusCode != http.StatusOK {
zap.S().Error("Error while fetching trial details", err)
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
// decode response body
var trialRespData model.SubscriptionServerResp
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
zap.S().Error("Error while decoding trial details", err)
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
resp.TrialStart = trialRespData.Data.TrialStart
resp.TrialEnd = trialRespData.Data.TrialEnd
resp.OnTrial = trialRespData.Data.OnTrial
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
ah.Respond(w, resp)
}
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
}
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
}

View File

@@ -137,8 +137,8 @@ func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request
var s basemodel.Series
s.QueryName = name
s.Labels = v.Metric.Copy().Map()
for _, p := range v.Floats {
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.F})
for _, p := range v.Points {
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
}
seriesList = append(seriesList, &s)
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
@@ -48,18 +47,8 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
req.CreatedAt = time.Now().Unix()
req.Token = generatePATToken()
// default expiry is 30 days
if req.ExpiresAt == 0 {
req.ExpiresAt = time.Now().AddDate(0, 0, 30).Unix()
}
// max expiry is 1 year
if req.ExpiresAt > time.Now().AddDate(1, 0, 0).Unix() {
req.ExpiresAt = time.Now().AddDate(1, 0, 0).Unix()
}
zap.S().Debugf("Got PAT request: %+v", req)
var apierr basemodel.BaseApiError
if req, apierr = ah.AppDao().CreatePAT(ctx, req); apierr != nil {
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
RespondError(w, apierr, nil)
return
}

View File

@@ -1,8 +1,6 @@
package db
import (
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/jmoiron/sqlx"
@@ -17,16 +15,8 @@ type ClickhouseReader struct {
*basechr.ClickHouseReader
}
func NewDataConnector(
localDB *sqlx.DB,
promConfigPath string,
lm interfaces.FeatureLookup,
maxIdleConns int,
maxOpenConns int,
dialTimeout time.Duration,
cluster string,
) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm)
return &ClickhouseReader{
conn: ch.GetConn(),
appdb: localDB,

View File

@@ -5,7 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // http profiler
@@ -20,13 +20,8 @@ import (
"github.com/soheilhy/cmux"
"go.signoz.io/signoz/ee/query-service/app/api"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/auth"
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
@@ -34,11 +29,9 @@ import (
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/cache"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
@@ -54,21 +47,12 @@ import (
const AppDbEngine = "sqlite"
type ServerOptions struct {
PromConfigPath string
SkipTopLvlOpsPath string
HTTPHostPort string
PrivateHostPort string
PromConfigPath string
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
PreferDelta bool
PreferSpanMetrics bool
MaxIdleConns int
MaxOpenConns int
DialTimeout time.Duration
CacheConfigPath string
FluxInterval string
Cluster string
DisableRules bool
RuleRepoURL string
}
// Server runs HTTP api service
@@ -89,11 +73,6 @@ type Server struct {
// feature flags
featureLookup baseint.FeatureLookup
// Usage manager
usageManager *usage.Manager
opampServer *opamp.Server
unavailableChannel chan healthcheck.Status
}
@@ -134,27 +113,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector(
localDB,
serverOptions.PromConfigPath,
lm,
serverOptions.MaxIdleConns,
serverOptions.MaxOpenConns,
serverOptions.DialTimeout,
serverOptions.Cluster,
)
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
go qb.Start(readerReady)
reader = qb
} else {
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
}
skipConfig := &basemodel.SkipConfig{}
if serverOptions.SkipTopLvlOpsPath != "" {
// read skip config
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
}
<-readerReady
@@ -163,8 +126,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.RuleRepoURL,
localDB,
reader,
serverOptions.DisableRules,
lm)
serverOptions.DisableRules)
if err != nil {
return nil, err
@@ -176,19 +138,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
if err != nil {
return nil, err
}
// initiate agent config handler
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
DB: localDB,
DBEngine: AppDbEngine,
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
})
if err != nil {
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
return nil, err
}
@@ -203,39 +154,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
telemetry.GetInstance().SetReader(reader)
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
if err != nil {
return nil, err
}
apiOpts := api.APIHandlerOptions{
DataConnector: reader,
SkipConfig: skipConfig,
PreferDelta: serverOptions.PreferDelta,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
MaxIdleConns: serverOptions.MaxIdleConns,
MaxOpenConns: serverOptions.MaxOpenConns,
DialTimeout: serverOptions.DialTimeout,
AppDao: modelDao,
RulesManager: rm,
UsageManager: usageManager,
FeatureFlags: lm,
LicenseManager: lm,
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
DataConnector: reader,
AppDao: modelDao,
RulesManager: rm,
FeatureFlags: lm,
LicenseManager: lm,
}
apiHandler, err := api.NewAPIHandler(apiOpts)
@@ -249,7 +174,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
ruleManager: rm,
serverOptions: serverOptions,
unavailableChannel: make(chan healthcheck.Status),
usageManager: usageManager,
}
httpServer, err := s.createPublicServer(apiHandler)
@@ -267,10 +191,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
s.privateHTTP = privateServer
s.opampServer = opamp.InitializeServer(
&opAmpModel.AllAgents, agentConfMgr,
)
return s, nil
}
@@ -354,7 +274,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
@@ -366,7 +286,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
@@ -391,20 +311,20 @@ func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFrom := "/api/v3/query_range"
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
data := map[string]interface{}{}
var postData *v3.QueryRangeParamsV3
var postData *basemodel.QueryRangeParamsV2
if path == pathToExtractBodyFrom && (r.Method == "POST") {
if r.Body != nil {
bodyBytes, err := io.ReadAll(r.Body)
bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, false
}
r.Body.Close() // must close
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &postData)
} else {
@@ -415,34 +335,24 @@ func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface
return nil, false
}
signozMetricsUsed := false
signozLogsUsed := false
dataSources := []string{}
signozMetricNotFound := false
if postData != nil {
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
if postData.CompositeQuery != nil {
data["queryType"] = postData.CompositeQuery.QueryType
data["panelType"] = postData.CompositeQuery.PanelType
signozLogsUsed, signozMetricsUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
if postData.CompositeMetricQuery != nil {
data["queryType"] = postData.CompositeMetricQuery.QueryType
data["panelType"] = postData.CompositeMetricQuery.PanelType
}
data["datasource"] = postData.DataSource
}
if signozMetricsUsed || signozLogsUsed {
if signozMetricsUsed {
dataSources = append(dataSources, "metrics")
telemetry.GetInstance().AddActiveMetricsUser()
}
if signozLogsUsed {
dataSources = append(dataSources, "logs")
telemetry.GetInstance().AddActiveLogsUser()
}
data["dataSources"] = dataSources
userEmail, err := auth.GetEmailFromJwt(r.Context())
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_V3, data, userEmail, true)
}
if signozMetricNotFound {
telemetry.GetInstance().AddActiveMetricsUser()
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
}
return data, true
}
@@ -462,12 +372,10 @@ func getActiveLogs(path string, r *http.Request) {
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := auth.AttachJwtToContext(r.Context(), r)
r = r.WithContext(ctx)
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
queryRangeV3data, metadataExists := extractQueryRangeV3Data(path, r)
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w)
@@ -475,16 +383,13 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range queryRangeV3data {
for key, value := range dashboardMetadata {
data[key] = value
}
}
if _, ok := telemetry.EnabledPaths()[path]; ok {
userEmail, err := auth.GetEmailFromJwt(r.Context())
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail)
}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
}
})
@@ -497,7 +402,7 @@ func setTimeoutMiddleware(next http.Handler) http.Handler {
// check if route is not excluded
url := r.URL.Path
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
defer cancel()
}
@@ -601,7 +506,7 @@ func (s *Server) Start() error {
go func() {
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
err := opamp.InitalizeServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
if err != nil {
zap.S().Info("opamp ws server failed to start", err)
s.unavailableChannel <- healthcheck.Unavailable
@@ -624,15 +529,12 @@ func (s *Server) Stop() error {
}
}
s.opampServer.Stop()
opamp.StopServer()
if s.ruleManager != nil {
s.ruleManager.Stop()
}
// stop usage manager
s.usageManager.Stop()
return nil
}
@@ -642,8 +544,7 @@ func makeRulesManager(
ruleRepoURL string,
db *sqlx.DB,
ch baseint.Reader,
disableRules bool,
fm baseInterface.FeatureLookup) (*rules.Manager, error) {
disableRules bool) (*rules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
@@ -670,7 +571,6 @@ func makeRulesManager(
Context: context.Background(),
Logger: nil,
DisableRules: disableRules,
FeatureFlags: fm,
}
// create Manager

View File

@@ -9,8 +9,7 @@ const (
)
var LicenseSignozIo = "https://license.signoz.io/api/v1"
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
func GetOrDefaultEnv(key string, fallback string) string {

View File

@@ -21,6 +21,7 @@ type ModelDao interface {
DB() *sqlx.DB
// auth methods
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
@@ -33,7 +34,7 @@ type ModelDao interface {
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)

View File

@@ -5,60 +5,15 @@ import (
"fmt"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.uber.org/zap"
)
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
// get auth domain from email domain
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
zap.S().Errorf("failed to get domain from email", apierr)
return nil, model.InternalErrorStr("failed to get domain from email")
}
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
if err != nil {
zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
return nil, model.InternalErrorStr("failed to generate password hash")
}
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
if apiErr != nil {
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
return nil, apiErr
}
user := &basemodel.User{
Id: uuid.NewString(),
Name: "",
Email: email,
Password: hash,
CreatedAt: time.Now().Unix(),
ProfilePictureURL: "", // Currently unused
GroupId: group.Id,
OrgId: domain.OrgId,
}
user, apiErr = m.CreateUser(ctx, user, false)
if apiErr != nil {
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
return nil, apiErr
}
return user, nil
}
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
@@ -69,20 +24,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
user := &basemodel.User{}
if userPayload == nil {
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
user = newUser
if apiErr != nil {
zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error())
return "", apiErr
}
} else {
user = &userPayload.User
}
tokenStore, err := baseauth.GenerateJWTForUser(user)
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
if err != nil {
zap.S().Errorf("failed to generate token for SSO login user", err)
return "", model.InternalErrorStr("failed to generate token for the user")
@@ -91,7 +33,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
user.Id,
userPayload.User.Id,
tokenStore.RefreshJwt), nil
}
@@ -120,10 +62,10 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
// PrecheckLogin is called when the login or signup page is loaded
// to check sso login is to be prompted
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
// assume user is valid unless proven otherwise
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
// check if email is a valid user
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
@@ -134,7 +76,6 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
if userPayload == nil {
resp.IsUser = false
}
ssoAvailable := true
err := m.checkFeature(model.SSO)
if err != nil {
@@ -150,8 +91,6 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
if ssoAvailable {
resp.IsUser = true
// find domain from email
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {

View File

@@ -4,8 +4,8 @@ import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/url"
"fmt"
"strings"
"time"
@@ -29,69 +29,28 @@ type StoredDomain struct {
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId or domainName as query parameter.
// with domainId as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
var domainNameStr string
var domain *model.OrgDomain
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
if k == "domainName" && len(v) > 0 {
domainNameStr = v[0]
}
}
if domainIdStr != "" {
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.S().Errorf("failed to parse domainId from relay state", err)
return nil, fmt.Errorf("failed to parse domainId from IdP response")
}
domain, err = m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
}
}
if domainNameStr != "" {
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
domain = domainFromDB
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
}
}
if domain != nil {
return domain, nil
}
return nil, fmt.Errorf("failed to find domain received in IdP response")
}
// GetDomainByName returns org domain for a given domain name
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
if err == sql.ErrNoRows {
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
}
return nil, model.InternalError(err)
zap.S().Errorf("failed to parse domain id from relay state", err)
return nil, fmt.Errorf("failed to parse response from IdP response")
}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
domain, err := m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
}
return domain, nil
}
@@ -110,7 +69,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
return domain, model.InternalError(err)
}
return domain, nil
}
@@ -247,7 +206,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
return domain, model.InternalError(err)
}
return domain, nil
}

View File

@@ -3,15 +3,14 @@ package sqlite
import (
"context"
"fmt"
"strconv"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
result, err := m.DB().ExecContext(ctx,
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
_, err := m.DB().ExecContext(ctx,
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
p.UserID,
p.Token,
@@ -20,15 +19,9 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
p.ExpiresAt)
if err != nil {
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
return model.InternalError(fmt.Errorf("PAT insertion failed"))
}
id, err := result.LastInsertId()
if err != nil {
zap.S().Errorf("Failed to get last inserted id, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
}
p.Id = strconv.Itoa(int(id))
return p, nil
return nil
}
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
@@ -97,7 +90,7 @@ func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.U
u.org_id,
u.group_id
FROM users u, personal_access_tokens p
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
WHERE u.id = p.user_id and p.token=?;`
if err := m.DB().Select(&users, query, token); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))

View File

@@ -6,13 +6,13 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.uber.org/zap"
)
var C *Client
@@ -51,7 +51,7 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
}
httpBody, err := io.ReadAll(httpResponse.Body)
httpBody, err := ioutil.ReadAll(httpResponse.Body)
if err != nil {
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
@@ -91,7 +91,7 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
}
body, err := io.ReadAll(response.Body)
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
}

View File

@@ -2,7 +2,6 @@ package license
import (
"context"
"database/sql"
"fmt"
"time"
@@ -10,7 +9,6 @@ import (
"go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
@@ -127,79 +125,3 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
return nil
}
func (r *Repo) CreateFeature(req *basemodel.Feature) *basemodel.ApiError {
_, err := r.db.Exec(
`INSERT INTO feature_status (name, active, usage, usage_limit, route)
VALUES (?, ?, ?, ?, ?);`,
req.Name, req.Active, req.Usage, req.UsageLimit, req.Route)
if err != nil {
return &basemodel.ApiError{Typ: basemodel.ErrorInternal, Err: err}
}
return nil
}
func (r *Repo) GetFeature(featureName string) (basemodel.Feature, error) {
var feature basemodel.Feature
err := r.db.Get(&feature,
`SELECT * FROM feature_status WHERE name = ?;`, featureName)
if err != nil {
return feature, err
}
if feature.Name == "" {
return feature, basemodel.ErrFeatureUnavailable{Key: featureName}
}
return feature, nil
}
func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
var feature []basemodel.Feature
err := r.db.Select(&feature,
`SELECT * FROM feature_status;`)
if err != nil {
return feature, err
}
return feature, nil
}
func (r *Repo) UpdateFeature(req basemodel.Feature) error {
_, err := r.db.Exec(
`UPDATE feature_status SET active = ?, usage = ?, usage_limit = ?, route = ? WHERE name = ?;`,
req.Active, req.Usage, req.UsageLimit, req.Route, req.Name)
if err != nil {
return err
}
return nil
}
func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
// get a feature by name, if it doesn't exist, create it. If it does exist, update it.
for _, feature := range req {
currentFeature, err := r.GetFeature(feature.Name)
if err != nil && err == sql.ErrNoRows {
err := r.CreateFeature(&feature)
if err != nil {
return err
}
continue
} else if err != nil {
return err
}
feature.Usage = currentFeature.Usage
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
feature.Active = false
}
err = r.UpdateFeature(feature)
if err != nil {
return err
}
}
return nil
}

View File

@@ -10,7 +10,6 @@ import (
"sync"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
@@ -97,11 +96,6 @@ func (lm *Manager) SetActive(l *model.License) {
lm.activeFeatures = l.FeatureSet
// set default features
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.S().Panicf("Couldn't activate features: %v", err)
}
if !lm.validatorRunning {
// we want to make sure only one validator runs,
// we already have lock() so good to go
@@ -112,7 +106,9 @@ func (lm *Manager) SetActive(l *model.License) {
}
func setDefaultFeatures(lm *Manager) {
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
for k, v := range baseconstants.DEFAULT_FEATURE_SET {
lm.activeFeatures[k] = v
}
}
// LoadActiveLicense loads the most recent active license
@@ -127,13 +123,8 @@ func (lm *Manager) LoadActiveLicense() error {
} else {
zap.S().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = model.BasicPlan
lm.activeFeatures = basemodel.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.S().Error("Couldn't initialize features: ", err)
return err
}
}
return nil
@@ -204,7 +195,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
zap.S().Errorf("License validation completed with error", reterr)
atomic.AddUint64(&lm.failedAttempts, 1)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "")
map[string]interface{}{"err": reterr.Error()})
} else {
zap.S().Info("License validation completed with no errors")
}
@@ -260,11 +251,8 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
defer func() {
if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx)
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail)
}
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()})
}
}()
@@ -303,31 +291,18 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
// CheckFeature will be internally used by backend routines
// for feature gating
func (lm *Manager) CheckFeature(featureKey string) error {
feature, err := lm.repo.GetFeature(featureKey)
if err != nil {
return err
}
if feature.Active {
return nil
if value, ok := lm.activeFeatures[featureKey]; ok {
if value {
return nil
}
return basemodel.ErrFeatureUnavailable{Key: featureKey}
}
return basemodel.ErrFeatureUnavailable{Key: featureKey}
}
// GetFeatureFlags returns current active features
func (lm *Manager) GetFeatureFlags() (basemodel.FeatureSet, error) {
return lm.repo.GetAllFeatures()
}
func (lm *Manager) InitFeatures(features basemodel.FeatureSet) error {
return lm.repo.InitFeatures(features)
}
func (lm *Manager) UpdateFeatureFlag(feature basemodel.Feature) error {
return lm.repo.UpdateFeature(feature)
}
func (lm *Manager) GetFeatureFlag(key string) (basemodel.Feature, error) {
return lm.repo.GetFeature(key)
func (lm *Manager) GetFeatureFlags() basemodel.FeatureSet {
return lm.activeFeatures
}
// GetRepo return the license repo

View File

@@ -2,7 +2,6 @@ package sqlite
import (
"fmt"
"github.com/jmoiron/sqlx"
)
@@ -34,19 +33,5 @@ func InitDB(db *sqlx.DB) error {
if err != nil {
return fmt.Errorf("Error in creating licenses table: %s", err.Error())
}
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
name TEXT PRIMARY KEY,
active bool,
usage INTEGER DEFAULT 0,
usage_limit INTEGER DEFAULT 0,
route TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("Error in creating feature_status table: %s", err.Error())
}
return nil
}

View File

@@ -3,113 +3,43 @@ package main
import (
"context"
"flag"
"log"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/constants"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/version"
"google.golang.org/grpc"
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
func initZapLog() *zap.Logger {
config := zap.NewDevelopmentConfig()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
defer stop()
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
defaultLogLevel := zapcore.DebugLevel
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
res := resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String("query-service"),
)
core := zapcore.NewTee(
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
)
if enableQueryServiceLogOTLPExport == true {
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
if err != nil {
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
} else {
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
if err != nil {
logExportBatchSizeInt = 1000
}
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
BatchSize: logExportBatchSizeInt,
ResourceSchema: semconv.SchemaURL,
Resource: res,
}))
core = zapcore.NewTee(
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
)
}
}
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
logger, _ := config.Build()
return logger
}
func main() {
var promConfigPath, skipTopLvlOpsPath string
var promConfigPath string
// disables rule execution but allows change to the rule definition
var disableRules bool
// the url used to build link in the alert messages in slack and other systems
var ruleRepoURL string
var cluster string
var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool
var preferDelta bool
var preferSpanMetrics bool
var maxIdleConns int
var maxOpenConns int
var dialTimeout time.Duration
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.Parse()
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
loggerMgr := initZapLog()
zap.ReplaceGlobals(loggerMgr)
defer loggerMgr.Sync() // flushes buffer, if any
@@ -117,20 +47,11 @@ func main() {
version.PrintVersion()
serverOptions := &app.ServerOptions{
HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferDelta: preferDelta,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval,
Cluster: cluster,
HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath,
PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,
}
// Read the jwt secret key
@@ -164,7 +85,6 @@ func main() {
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
case <-signalsChannel:
logger.Fatal("Received OS Interrupt Signal ... ")
server.Stop()
}
}
}

View File

@@ -4,9 +4,18 @@ import (
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
// PrecheckResponse contains login precheck response
type PrecheckResponse struct {
SSO bool `json:"sso"`
SsoUrl string `json:"ssoUrl"`
CanSelfRegister bool `json:"canSelfRegister"`
IsUser bool `json:"isUser"`
SsoError string `json:"ssoError"`
}
// GettableInvitation overrides base object and adds precheck into
// response
type GettableInvitation struct {
*basemodel.InvitationResponseObject
Precheck *basemodel.PrecheckResponse `json:"precheck"`
Precheck *PrecheckResponse `json:"precheck"`
}

View File

@@ -2,7 +2,6 @@ package model
import (
"fmt"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
@@ -62,6 +61,7 @@ func InternalError(err error) *ApiError {
}
}
// InternalErrorStr returns a ApiError object of internal type for string input
func InternalErrorStr(s string) *ApiError {
return &ApiError{
@@ -69,7 +69,6 @@ func InternalErrorStr(s string) *ApiError {
Err: fmt.Errorf(s),
}
}
var (
ErrorNone basemodel.ErrorType = ""
ErrorTimeout basemodel.ErrorType = "timeout"

View File

@@ -89,18 +89,3 @@ func (l *License) ParseFeatures() {
l.FeatureSet = BasicPlan
}
}
type Licenses struct {
TrialStart int64 `json:"trialStart"`
TrialEnd int64 `json:"trialEnd"`
OnTrial bool `json:"onTrial"`
WorkSpaceBlock bool `json:"workSpaceBlock"`
TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
GracePeriodEnd int64 `json:"gracePeriodEnd"`
Licenses []License `json:"licenses"`
}
type SubscriptionServerResp struct {
Status string `json:"status"`
Data Licenses `json:"data"`
}

View File

@@ -6,5 +6,5 @@ type PAT struct {
Token string `json:"token" db:"token"`
Name string `json:"name" db:"name"`
CreatedAt int64 `json:"createdAt" db:"created_at"`
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
}

View File

@@ -9,287 +9,23 @@ const Basic = "BASIC_PLAN"
const Pro = "PRO_PLAN"
const Enterprise = "ENTERPRISE_PLAN"
const DisableUpsell = "DISABLE_UPSELL"
const Onboarding = "ONBOARDING"
const ChatSupport = "CHAT_SUPPORT"
var BasicPlan = basemodel.FeatureSet{
basemodel.Feature{
Name: SSO,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.OSS,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: DisableUpsell,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.SmartTraceDetail,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.CustomMetricsFunction,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderPanels,
Active: true,
Usage: 0,
UsageLimit: 20,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderAlerts,
Active: true,
Usage: 0,
UsageLimit: 10,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelSlack,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelWebhook,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelPagerduty,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelOpsgenie,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelMsTeams,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
Basic: true,
SSO: false,
DisableUpsell: false,
}
var ProPlan = basemodel.FeatureSet{
basemodel.Feature{
Name: SSO,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.OSS,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.SmartTraceDetail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.CustomMetricsFunction,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderPanels,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderAlerts,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelSlack,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelWebhook,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelPagerduty,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelOpsgenie,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelMsTeams,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
Pro: true,
SSO: true,
basemodel.SmartTraceDetail: true,
basemodel.CustomMetricsFunction: true,
}
var EnterprisePlan = basemodel.FeatureSet{
basemodel.Feature{
Name: SSO,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.OSS,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.SmartTraceDetail,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.CustomMetricsFunction,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderPanels,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.QueryBuilderAlerts,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelSlack,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelWebhook,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelPagerduty,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelOpsgenie,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AlertChannelMsTeams,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: Onboarding,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: ChatSupport,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
Enterprise: true,
SSO: true,
basemodel.SmartTraceDetail: true,
basemodel.CustomMetricsFunction: true,
}

View File

@@ -9,7 +9,6 @@ import (
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/go-co-op/gocron"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
@@ -29,6 +28,9 @@ const (
)
var (
// send usage every 24 hour
uploadFrequency = 24 * time.Hour
locker = stateUnlocked
)
@@ -37,7 +39,12 @@ type Manager struct {
licenseRepo *license.Repo
scheduler *gocron.Scheduler
// end the usage routine, this is important to gracefully
// stopping usage reporting and protect in-consistent updates
done chan struct{}
// terminated waits for the UsageExporter go routine to end
terminated chan struct{}
}
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
@@ -46,7 +53,6 @@ func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn c
// repository: repo,
clickhouseConn: clickhouseConn,
licenseRepo: licenseRepo,
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
}
return m, nil
}
@@ -58,30 +64,37 @@ func (lm *Manager) Start() error {
return fmt.Errorf("usage exporter is locked")
}
_, err := lm.scheduler.Do(func() { lm.UploadUsage() })
if err != nil {
return err
}
// upload usage once when starting the service
lm.UploadUsage()
lm.scheduler.StartAsync()
go lm.UsageExporter(context.Background())
return nil
}
func (lm *Manager) UploadUsage() {
ctx := context.Background()
func (lm *Manager) UsageExporter(ctx context.Context) {
defer close(lm.terminated)
uploadTicker := time.NewTicker(uploadFrequency)
defer uploadTicker.Stop()
for {
select {
case <-lm.done:
return
case <-uploadTicker.C:
lm.UploadUsage(ctx)
}
}
}
func (lm *Manager) UploadUsage(ctx context.Context) error {
// check if license is present or not
license, err := lm.licenseRepo.GetActiveLicense(ctx)
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
if err != nil {
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
return
return fmt.Errorf("failed to get active license")
}
if license == nil {
// we will not start the usage reporting if license is not present.
zap.S().Info("no license present, skipping usage reporting")
return
return nil
}
usages := []model.UsageDB{}
@@ -107,8 +120,7 @@ func (lm *Manager) UploadUsage() {
dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
return
return err
}
for _, u := range dbusages {
u.Type = db
@@ -118,7 +130,7 @@ func (lm *Manager) UploadUsage() {
if len(usages) <= 0 {
zap.S().Info("no snapshots to upload, skipping.")
return
return nil
}
zap.S().Info("uploading usage data")
@@ -127,15 +139,13 @@ func (lm *Manager) UploadUsage() {
for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil {
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
return
return err
}
usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil {
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
return
return err
}
usageData.CollectorID = usage.CollectorID
@@ -150,16 +160,20 @@ func (lm *Manager) UploadUsage() {
LicenseKey: key,
Usage: usagesPayload,
}
lm.UploadUsageWithExponentalBackOff(ctx, payload)
err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
if err != nil {
return err
}
return nil
}
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) {
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
for i := 1; i <= MaxRetries; i++ {
apiErr := licenseserver.SendUsage(ctx, payload)
if apiErr != nil && i == MaxRetries {
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
// not returning error here since it is captured in the failed count
return
return nil
} else if apiErr != nil {
// sleeping for exponential backoff
sleepDuration := RetryInterval * time.Duration(i)
@@ -169,14 +183,11 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
break
}
}
return nil
}
func (lm *Manager) Stop() {
lm.scheduler.Stop()
zap.S().Debug("sending usage data before shutting down")
// send usage before shutting down
lm.UploadUsage()
close(lm.done)
atomic.StoreUint32(&locker, stateUnlocked)
<-lm.terminated
}

View File

@@ -1,7 +1,7 @@
{
"presets": [
"@babel/preset-env",
["@babel/preset-react", { "runtime": "automatic" }],
"@babel/preset-react",
"@babel/preset-typescript"
],
"plugins": [

View File

@@ -1,3 +1,5 @@
node_modules
.vscode
build
.env
.git

View File

@@ -16,7 +16,6 @@ module.exports = {
'plugin:sonarjs/recommended',
'plugin:import/errors',
'plugin:import/warnings',
'plugin:react/jsx-runtime',
],
parser: '@typescript-eslint/parser',
parserOptions: {
@@ -86,7 +85,6 @@ module.exports = {
},
],
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
'no-plusplus': 'off',
'jsx-a11y/label-has-associated-control': [
'error',
{
@@ -110,6 +108,7 @@ module.exports = {
// eslint rules need to remove
'@typescript-eslint/no-shadow': 'off',
'import/no-cycle': 'off',
'prettier/prettier': [
'error',
{},

View File

@@ -2,19 +2,3 @@
. "$(dirname "$0")/_/husky.sh"
cd frontend && yarn run commitlint --edit $1
branch="$(git rev-parse --abbrev-ref HEAD)"
color_red="$(tput setaf 1)"
bold="$(tput bold)"
reset="$(tput sgr0)"
if [ "$branch" = "main" ]; then
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
exit 1
fi
if [ "$branch" = "develop" ]; then
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
exit 1
fi

View File

@@ -1,6 +0,0 @@
# Ignore artifacts:
build
coverage
# Ignore all MD files:
**/*.md

View File

@@ -1,2 +1 @@
network-timeout 600000
save-prefix ""

View File

@@ -1,56 +0,0 @@
# **Frontend Guidelines**
Embrace the spirit of collaboration and contribute to the success of our open-source project by adhering to these frontend development guidelines with precision and passion.
### React and Components
- Strive to create small and modular components, ensuring they are divided into individual pieces for improved maintainability and reusability.
- Avoid passing inline objects or functions as props to React components, as they are recreated with each render cycle.
Utilize careful memoization of functions and variables, balancing optimization efforts to prevent potential performance issues. [When to useMemo and useCallback](https://kentcdodds.com/blog/usememo-and-usecallback) by Kent C. Dodds is quite helpful for this scenario.
- Minimize the use of inline functions whenever possible to enhance code readability and improve overall comprehension.
- Employ the appropriate usage of useMemo and useCallback hooks for effective memoization of values and functions.
- Determine the appropriate placement of components:
- Pages should contain an aggregation of all components and containers.
- Commonly used components should reside in the 'components' directory.
- Parent components responsible for data manipulation should be placed in the 'container' directory.
- Strategically decide where to store data, either in global state or local components:
- Begin by storing data in local components and gradually transition to global state as necessary.
- Avoid importing default namespace `React` as the project is using `v18` and `import React from 'react'` is not needed anymore.
- When a function requires more than three arguments (except when memoized), encapsulate them within an object to enhance readability and reduce potential parameter complexity.
### API and Services
- Avoid incorporating business logic within API/Service files to maintain flexibility for consumers to handle it according to their specific needs.
- Employ the use of the useQuery hook for fetching data and the useMutation hook for updating data, ensuring a consistent and efficient approach.
- Utilize the useQueryClient hook when updating the cache, facilitating smooth and effective management of data within the application.
**Note -** In our project, we are utilizing React Query v3. To gain a comprehensive understanding of its features and implementation, we recommend referring to the [official documentation](https://tanstack.com/query/v3/docs/react/overview) as a valuable resource.
### Styling
- Refrain from using inline styling within React components to maintain separation of concerns and promote a more maintainable codebase.
- Opt for using the rem unit instead of px values to ensure better scalability and responsiveness across different devices and screen sizes.
### Linting and Setup
- It is crucial to refrain from disabling ESLint and TypeScript errors within the project. If there is a specific rule that needs to be disabled, provide a clear and justified explanation for doing so. Maintaining the integrity of the linting and type-checking processes ensures code quality and consistency throughout the codebase.
- In our project, we rely on several essential ESLint plugins, namely:
- [plugin:@typescript-eslint](https://typescript-eslint.io/rules/)
- [airbnb styleguide](https://github.com/airbnb/javascript)
- [plugin:sonarjs](https://github.com/SonarSource/eslint-plugin-sonarjs)
To ensure compliance with our coding standards and best practices, we encourage you to refer to the documentation of these plugins. Familiarizing yourself with the ESLint rules they provide will help maintain code quality and consistency throughout the project.
### Naming Conventions
- Ensure that component names are written in Capital Case, while the folder names should be in lowercase.
- Keep all other elements, such as variables, functions, and file names, in lowercase.
### Miscellaneous
- Ensure that functions are modularized and follow the Single Responsibility Principle (SRP). The function's name should accurately convey its purpose and functionality.
- Semantic division of functions into smaller units should be prioritized for improved readability and maintainability.
Aim to keep functions concise and avoid exceeding a maximum length of 40 lines to enhance code understandability and ease of maintenance.
- Eliminate the use of hard-coded strings or enums, favoring a more flexible and maintainable approach.
- Strive to internationalize all strings within the codebase to support localization and improve accessibility for users across different languages.
- Minimize the usage of multiple if statements or switch cases within a function. Consider creating a mapper and separating logic into multiple functions for better code organization.

View File

@@ -1,17 +1,36 @@
FROM nginx:1.25.2-alpine
# Builder stage
FROM node:16.15.0-slim as builder
# Add Maintainer Info
LABEL maintainer="signoz"
# Set working directory
ARG TARGETOS=linux
ARG TARGETARCH
WORKDIR /frontend
# Copy the package.json and .yarnrc files prior to install dependencies
COPY package.json ./
COPY .yarnrc ./
# Install the dependencies and make the folder
RUN CI=1 yarn install
COPY . .
# Build the project and copy the files
RUN yarn build
FROM nginx:1.18-alpine
COPY conf/default.conf /etc/nginx/conf.d/default.conf
# Remove default nginx index page
RUN rm -rf /usr/share/nginx/html/*
# Copy custom nginx config and static files
COPY conf/default.conf /etc/nginx/conf.d/default.conf
COPY build /usr/share/nginx/html
# Copy from the stahg 1
COPY --from=builder /frontend/build /usr/share/nginx/html
EXPOSE 3301

View File

@@ -1,7 +0,0 @@
NODE_ENV="development"
BUNDLE_ANALYSER="true"
FRONTEND_API_ENDPOINT="http://localhost:3301/"
INTERCOM_APP_ID="intercom-app-id"
PLAYWRIGHT_TEST_BASE_URL="http://localhost:3301"
CI="1"

View File

@@ -7,7 +7,7 @@ const config: Config.InitialOptions = {
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
modulePathIgnorePatterns: ['dist'],
moduleNameMapper: {
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
},
globals: {
extensionsToTreatAsEsm: ['.ts'],
@@ -21,9 +21,7 @@ const config: Config.InitialOptions = {
'^.+\\.(ts|tsx)?$': 'ts-jest',
'^.+\\.(js|jsx)$': 'babel-jest',
},
transformIgnorePatterns: [
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios)/)',
],
transformIgnorePatterns: ['node_modules/(?!(lodash-es)/)'],
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'],
moduleDirectories: ['node_modules', 'src'],

View File

@@ -1,29 +1,5 @@
/* eslint-disable @typescript-eslint/explicit-function-return-type */
/* eslint-disable object-shorthand */
/* eslint-disable func-names */
/**
* Adds custom matchers from the react testing library to all tests
*/
import '@testing-library/jest-dom';
import 'jest-styled-components';
import { server } from './src/mocks-server/server';
// Establish API mocking before all tests.
// Mock window.matchMedia
window.matchMedia =
window.matchMedia ||
function (): any {
return {
matches: false,
addListener: function () {},
removeListener: function () {},
};
};
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());

View File

@@ -29,43 +29,37 @@
"dependencies": {
"@ant-design/colors": "6.0.0",
"@ant-design/icons": "4.8.0",
"@dnd-kit/core": "6.1.0",
"@dnd-kit/modifiers": "7.0.0",
"@dnd-kit/sortable": "8.0.0",
"@grafana/data": "^9.5.2",
"@mdx-js/loader": "2.3.0",
"@mdx-js/react": "2.3.0",
"@grafana/data": "^8.4.3",
"@monaco-editor/react": "^4.3.1",
"@uiw/react-md-editor": "3.23.5",
"@xstate/react": "^3.0.0",
"ansi-to-html": "0.7.2",
"antd": "5.11.0",
"antd-table-saveas-excel": "2.2.1",
"axios": "1.6.2",
"antd": "5.0.5",
"axios": "^0.21.0",
"babel-eslint": "^10.1.0",
"babel-jest": "^29.6.4",
"babel-loader": "9.1.3",
"babel-jest": "^26.6.0",
"babel-loader": "8.1.0",
"babel-plugin-named-asset-import": "^0.3.7",
"babel-preset-minify": "^0.5.1",
"babel-preset-react-app": "^10.0.1",
"babel-preset-react-app": "^10.0.0",
"chart.js": "3.9.1",
"chartjs-adapter-date-fns": "^2.0.0",
"chartjs-plugin-annotation": "^1.4.0",
"classnames": "2.3.2",
"color": "^4.2.1",
"color-alpha": "1.1.3",
"cross-env": "^7.0.3",
"css-loader": "5.0.0",
"css-minimizer-webpack-plugin": "5.0.1",
"css-loader": "4.3.0",
"css-minimizer-webpack-plugin": "^3.2.0",
"d3": "^6.2.0",
"d3-flame-graph": "^3.1.1",
"d3-tip": "^0.9.1",
"dayjs": "^1.10.7",
"dompurify": "3.0.0",
"dotenv": "8.2.0",
"event-source-polyfill": "1.0.31",
"eventemitter3": "5.0.1",
"file-loader": "6.1.1",
"flat": "^5.0.2",
"fontfaceobserver": "2.3.0",
"history": "4.10.1",
"html-webpack-plugin": "5.5.0",
"html-webpack-plugin": "5.1.0",
"i18next": "^21.6.12",
"i18next-browser-languagedetector": "^6.1.3",
"i18next-http-backend": "^1.3.2",
@@ -74,43 +68,33 @@
"less": "^4.1.2",
"less-loader": "^10.2.0",
"lodash-es": "^4.17.21",
"lucide-react": "0.288.0",
"mini-css-extract-plugin": "2.4.5",
"papaparse": "5.4.1",
"react": "18.2.0",
"react-addons-update": "15.6.3",
"react-dnd": "16.0.1",
"react-dnd-html5-backend": "16.0.1",
"react-dom": "18.2.0",
"react-drag-listview": "2.0.0",
"react-error-boundary": "4.0.11",
"react-force-graph": "^1.43.0",
"react-full-screen": "1.1.1",
"react-force-graph": "^1.41.0",
"react-graph-vis": "^1.0.5",
"react-grid-layout": "^1.3.4",
"react-helmet-async": "1.3.0",
"react-i18next": "^11.16.1",
"react-markdown": "8.0.7",
"react-query": "3.39.3",
"react-intersection-observer": "9.4.1",
"react-query": "^3.34.19",
"react-redux": "^7.2.2",
"react-router-dom": "^5.2.0",
"react-syntax-highlighter": "15.5.0",
"react-use": "^17.3.2",
"react-virtuoso": "4.0.3",
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"stream": "^0.0.2",
"style-loader": "1.3.0",
"styled-components": "^5.3.11",
"styled-components": "^5.2.1",
"terser-webpack-plugin": "^5.2.5",
"timestamp-nano": "^1.0.0",
"ts-node": "^10.2.1",
"tsconfig-paths-webpack-plugin": "^3.5.1",
"typescript": "^4.0.5",
"uplot": "1.6.26",
"uuid": "^8.3.2",
"web-vitals": "^0.2.4",
"webpack": "5.88.2",
"webpack-dev-server": "^4.15.1",
"webpack": "^5.23.0",
"webpack-dev-server": "^4.3.1",
"xstate": "^4.31.0"
},
"browserslist": {
@@ -126,13 +110,13 @@
]
},
"devDependencies": {
"@babel/core": "^7.22.11",
"@babel/plugin-proposal-class-properties": "^7.18.6",
"@babel/core": "^7.12.3",
"@babel/plugin-proposal-class-properties": "^7.12.13",
"@babel/plugin-syntax-jsx": "^7.12.13",
"@babel/preset-env": "^7.22.14",
"@babel/preset-env": "^7.12.17",
"@babel/preset-react": "^7.12.13",
"@babel/preset-typescript": "^7.21.4",
"@commitlint/cli": "^16.3.0",
"@babel/preset-typescript": "^7.12.17",
"@commitlint/cli": "^16.2.4",
"@commitlint/config-conventional": "^16.2.4",
"@jest/globals": "^27.5.1",
"@playwright/test": "^1.22.0",
@@ -142,42 +126,42 @@
"@types/color": "^3.0.3",
"@types/compression-webpack-plugin": "^9.0.0",
"@types/copy-webpack-plugin": "^8.0.1",
"@types/d3": "^6.2.0",
"@types/d3-tip": "^3.5.5",
"@types/dompurify": "^2.4.0",
"@types/event-source-polyfill": "^1.0.0",
"@types/flat": "^5.0.2",
"@types/fontfaceobserver": "2.1.0",
"@types/jest": "^27.5.1",
"@types/lodash-es": "^4.17.4",
"@types/mini-css-extract-plugin": "^2.5.1",
"@types/node": "^16.10.3",
"@types/papaparse": "5.3.7",
"@types/react": "18.0.26",
"@types/react-addons-update": "0.14.21",
"@types/react-dom": "18.0.10",
"@types/react-grid-layout": "^1.1.2",
"@types/react-helmet-async": "1.0.3",
"@types/react-redux": "^7.1.11",
"@types/react-resizable": "3.0.3",
"@types/react-router-dom": "^5.1.6",
"@types/react-syntax-highlighter": "15.5.7",
"@types/redux-mock-store": "1.0.4",
"@types/styled-components": "^5.1.4",
"@types/uuid": "^8.3.1",
"@types/vis": "^4.21.21",
"@types/webpack": "^5.28.0",
"@types/webpack-dev-server": "^4.7.2",
"@typescript-eslint/eslint-plugin": "^4.33.0",
"@typescript-eslint/parser": "^4.33.0",
"@types/webpack-dev-server": "^4.3.0",
"@typescript-eslint/eslint-plugin": "^4.28.2",
"@typescript-eslint/parser": "^4.28.2",
"@welldone-software/why-did-you-render": "6.2.1",
"autoprefixer": "^9.0.0",
"babel-plugin-styled-components": "^1.12.0",
"compression-webpack-plugin": "9.0.0",
"copy-webpack-plugin": "^8.1.0",
"critters-webpack-plugin": "^3.0.1",
"eslint": "^7.32.0",
"eslint": "^7.30.0",
"eslint-config-airbnb": "^19.0.4",
"eslint-config-airbnb-typescript": "^16.1.4",
"eslint-config-prettier": "^8.3.0",
"eslint-config-standard": "^16.0.3",
"eslint-plugin-import": "^2.28.1",
"eslint-plugin-jest": "^26.9.0",
"eslint-plugin-import": "^2.25.4",
"eslint-plugin-jest": "^26.1.2",
"eslint-plugin-jsx-a11y": "^6.5.1",
"eslint-plugin-node": "^11.1.0",
"eslint-plugin-prettier": "^4.0.0",
@@ -188,22 +172,18 @@
"eslint-plugin-sonarjs": "^0.12.0",
"husky": "^7.0.4",
"is-ci": "^3.0.1",
"jest-playwright-preset": "^1.7.2",
"jest-playwright-preset": "^1.7.0",
"jest-styled-components": "^7.0.8",
"lint-staged": "^12.5.0",
"msw": "1.3.2",
"less-plugin-npm-import": "^2.1.0",
"lint-staged": "^12.3.7",
"portfinder-sync": "^0.0.2",
"prettier": "2.2.1",
"raw-loader": "4.0.2",
"react-hooks-testing-library": "0.6.0",
"react-hot-loader": "^4.13.0",
"react-resizable": "3.0.4",
"redux-mock-store": "1.5.4",
"sass": "1.66.1",
"sass-loader": "13.3.2",
"ts-jest": "^27.1.5",
"ts-jest": "^27.1.4",
"ts-node": "^10.2.1",
"typescript-plugin-css-modules": "5.0.1",
"typescript-plugin-css-modules": "^3.4.0",
"webpack-bundle-analyzer": "^4.5.0",
"webpack-cli": "^4.9.2"
},
@@ -214,9 +194,6 @@
},
"resolutions": {
"@types/react": "18.0.26",
"@types/react-dom": "18.0.10",
"debug": "4.3.4",
"semver": "7.5.4",
"xml2js": "0.5.0"
"@types/react-dom": "18.0.10"
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

View File

@@ -1 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 103.53" style="enable-background:new 0 0 122.88 103.53" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M5.47,0h111.93c3.01,0,5.47,2.46,5.47,5.47v92.58c0,3.01-2.46,5.47-5.47,5.47H5.47 c-3.01,0-5.47-2.46-5.47-5.47V5.47C0,2.46,2.46,0,5.47,0L5.47,0z M31.84,38.55l17.79,18.42l2.14,2.13l-2.12,2.16L31.68,80.31 l-5.07-5l15.85-16.15L26.81,43.6L31.84,38.55L31.84,38.55z M94.1,79.41H54.69v-6.84H94.1V79.41L94.1,79.41z M38.19,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S35,9.83,38.19,9.83L38.19,9.83z M18.95,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S15.75,9.83,18.95,9.83L18.95,9.83z M7.49,5.41 h107.91c1.15,0,2.09,0.94,2.09,2.09v18.32H5.4V7.5C5.4,6.35,6.34,5.41,7.49,5.41L7.49,5.41z"/></g></svg>

Before

Width:  |  Height:  |  Size: 1.0 KiB

View File

@@ -1 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 88.17" style="enable-background:new 0 0 122.88 88.17" xml:space="preserve"><style type="text/css">.st0{fill:#0091E2;}</style><g><path class="st0" d="M121.68,33.34c-0.34-0.28-3.42-2.62-10.03-2.62c-1.71,0-3.48,0.17-5.19,0.46c-1.25-8.72-8.49-12.94-8.78-13.16 l-1.77-1.03l-1.14,1.65c-1.42,2.22-2.51,4.73-3.13,7.29c-1.2,4.96-0.46,9.63,2.05,13.62c-3.02,1.71-7.92,2.11-8.95,2.17l-80.93,0 c-2.11,0-3.82,1.71-3.82,3.82c-0.11,7.07,1.08,14.13,3.53,20.8c2.79,7.29,6.95,12.71,12.31,16.01c6.04,3.7,15.9,5.81,27.01,5.81 c5.01,0,10.03-0.46,14.99-1.37c6.9-1.25,13.51-3.65,19.6-7.12c5.02-2.91,9.52-6.61,13.34-10.94c6.44-7.24,10.26-15.33,13.05-22.51 c0.4,0,0.74,0,1.14,0c7.01,0,11.34-2.79,13.73-5.19c1.6-1.48,2.79-3.31,3.65-5.36l0.51-1.48L121.68,33.34L121.68,33.34z M71.59,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C70.68,38.98,71.08,39.38,71.59,39.38L71.59,39.38z M56.49,11.63h10.83c0.51,0,0.97-0.4,0.97-0.97V0.97c0-0.51-0.46-0.97-0.97-0.97 L56.49,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,11.17,55.97,11.63,56.49,11.63L56.49,11.63z M56.49,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.46-0.97-0.97-0.97H56.49c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C55.52,25.08,55.97,25.53,56.49,25.53L56.49,25.53z M41.5,25.53h10.83c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0H41.5c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C40.53,25.08,40.93,25.53,41.5,25.53L41.5,25.53z M26.28,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0H26.28c-0.51,0-0.97,0.4-0.97,0.97v9.69 C25.37,25.08,25.77,25.53,26.28,25.53L26.28,25.53z M56.49,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,38.98,55.97,39.38,56.49,39.38L56.49,39.38L56.49,39.38z M41.5,39.38 h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C40.53,38.98,40.93,39.38,41.5,39.38L41.5,39.38L41.5,39.38z M26.28,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69 c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97v9.69C25.37,38.98,25.77,39.38,26.28,39.38L26.28,39.38z M11.35,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0 v9.69C10.44,38.98,10.84,39.38,11.35,39.38L11.35,39.38L11.35,39.38z"/></g></svg>

Before

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 957 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.3 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 5.9 KiB

View File

@@ -1 +0,0 @@
<svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 80 80" width="2500" height="2500"><style>.st0{fill:#f3bd19}.st1{fill:#231f20}.st2{fill:#3ebeb0}.st3{fill:#37a595}.st4{fill:none}</style><path class="st0" d="M41.1 41.9H15.6V12.5h7.7c9.9 0 17.8 8 17.8 17.8v11.6z"/><path class="st1" d="M41.1 67.5c-14.1 0-25.6-11.4-25.6-25.6h25.6v25.6z"/><path class="st2" d="M41.1 41.9h23.3v25.6H41.1z"/><path class="st3" d="M41.1 41.9h5.4v25.6h-5.4z"/><path class="st4" d="M0 0h80v80H0z"/></svg>

Before

Width:  |  Height:  |  Size: 494 B

View File

@@ -1 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 109 122.88" style="enable-background:new 0 0 109 122.88" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;fill:#689F63;}</style><g><path class="st0" d="M68.43,87.08c-19.7,0-23.83-9.04-23.83-16.63c0-0.72,0.58-1.3,1.3-1.3h5.82c0.64,0,1.18,0.47,1.28,1.1 c0.88,5.93,3.49,8.92,15.41,8.92c9.49,0,13.52-2.14,13.52-7.18c0-2.9-1.15-5.05-15.89-6.49c-12.33-1.22-19.95-3.93-19.95-13.8 c0-9.08,7.66-14.49,20.5-14.49c14.42,0,21.56,5,22.46,15.76c0.03,0.37-0.1,0.73-0.35,1c-0.25,0.26-0.6,0.42-0.96,0.42H81.9 c-0.61,0-1.14-0.43-1.26-1.01c-1.41-6.23-4.81-8.23-14.07-8.23c-10.36,0-11.56,3.61-11.56,6.31c0,3.28,1.42,4.24,15.4,6.09 c13.84,1.84,20.41,4.43,20.41,14.16c0,9.81-8.18,15.43-22.45,15.43L68.43,87.08L68.43,87.08z M54.52,122.88 c-1.65,0-3.28-0.43-4.72-1.26l-15.03-8.9c-2.25-1.26-1.15-1.7-0.41-1.96c2.99-1.05,3.6-1.28,6.8-3.1c0.34-0.19,0.78-0.12,1.12,0.08 l11.55,6.85c0.42,0.23,1.01,0.23,1.4,0l45.03-25.99c0.42-0.24,0.69-0.72,0.69-1.22V35.43c0-0.52-0.27-0.98-0.7-1.24L55.23,8.22 c-0.42-0.25-0.97-0.25-1.39,0l-45,25.97c-0.44,0.25-0.71,0.73-0.71,1.23v51.96c0,0.5,0.27,0.97,0.7,1.21l12.33,7.12 c6.69,3.35,10.79-0.6,10.79-4.56V39.86c0-0.73,0.57-1.3,1.31-1.3l5.7,0c0.71,0,1.3,0.56,1.3,1.3v51.31 c0,8.93-4.87,14.05-13.33,14.05c-2.6,0-4.66,0-10.38-2.82L4.72,95.59C1.8,93.9,0,90.75,0,87.38V35.42c0-3.38,1.8-6.54,4.72-8.21 l45.07-26c2.85-1.61,6.64-1.61,9.47,0l45.02,26.01c2.91,1.68,4.72,4.82,4.72,8.21v51.96c0,3.37-1.81,6.51-4.72,8.21l-45.02,26 c-1.44,0.83-3.08,1.26-4.74,1.26L54.52,122.88L54.52,122.88z M54.52,122.88L54.52,122.88L54.52,122.88L54.52,122.88z"/></g></svg>

Before

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -1 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 98.18" style="enable-background:new 0 0 122.88 98.18" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M3.42,0h116.05c1.88,0,3.41,1.54,3.41,3.41v91.36c0,1.88-1.54,3.41-3.41,3.41l-116.05,0 C1.54,98.18,0,96.65,0,94.77L0,3.41C0,1.53,1.54,0,3.42,0L3.42,0L3.42,0z M25.89,8.19c2.05,0,3.72,1.67,3.72,3.72 c0,2.05-1.67,3.72-3.72,3.72c-2.05,0-3.72-1.67-3.72-3.72C22.17,9.85,23.83,8.19,25.89,8.19L25.89,8.19z M103.07,7.69l2.52,2.77 l2.52-2.77l1.97,1.79l-2.69,2.96l2.69,2.96l-1.97,1.79l-2.52-2.77l-2.52,2.77l-1.97-1.79l2.69-2.96l-2.69-2.96L103.07,7.69 L103.07,7.69z M14.52,8.19c2.05,0,3.72,1.67,3.72,3.72c0,2.05-1.67,3.72-3.72,3.72c-2.05,0-3.72-1.67-3.72-3.72 C10.79,9.85,12.46,8.19,14.52,8.19L14.52,8.19z M37.26,8.19c2.05,0,3.72,1.67,3.72,3.72c0,2.05-1.67,3.72-3.72,3.72 c-2.05,0-3.72-1.67-3.72-3.72C33.54,9.85,35.21,8.19,37.26,8.19L37.26,8.19z M14.05,22.75h93.33c1.77,0,3.22,1.49,3.22,3.22v59.2 c0,1.73-1.49,3.22-3.22,3.22l-93.33,0c-1.73,0-3.22-1.45-3.22-3.22v-59.2C10.84,24.2,12.29,22.75,14.05,22.75L14.05,22.75 L14.05,22.75z"/></g></svg>

Before

Width:  |  Height:  |  Size: 1.3 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

View File

@@ -1,114 +1,112 @@
{
"target_missing": "Please enter a threshold to proceed",
"rule_test_fired": "Test notification sent successfully",
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
"button_testrule": "Test Notification",
"label_channel_select": "Notification Channels",
"placeholder_channel_select": "select one or more channels",
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
"preview_chart_threshold_label": "Threshold",
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
"button_yes": "Yes",
"button_no": "No",
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
"remove_label_success": "Labels cleared",
"alert_form_step1": "Step 1 - Define the metric",
"alert_form_step2": "Step 2 - Define Alert Conditions",
"alert_form_step3": "Step 3 - Alert Configuration",
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
"confirm_save_title": "Save Changes",
"confirm_save_content_part1": "Your alert built with",
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
"rule_created": "Rule created successfully",
"rule_edited": "Rule edited successfully",
"expression_missing": "expression is missing in {{where}}",
"metricname_missing": "metric name is missing in {{where}}",
"condition_required": "at least one metric condition is required",
"alertname_required": "alert name is required",
"promql_required": "promql expression is required when query format is set to PromQL",
"chquery_required": "query is required when query format is set to ClickHouse",
"button_savechanges": "Save Rule",
"button_createrule": "Create Rule",
"button_returntorules": "Return to rules",
"button_cancelchanges": "Cancel",
"button_discard": "Discard",
"text_condition1": "Send a notification when",
"text_condition2": "the threshold",
"text_condition3": "during the last",
"option_5min": "5 mins",
"option_10min": "10 mins",
"option_15min": "15 mins",
"option_60min": "60 mins",
"option_4hours": "4 hours",
"option_24hours": "24 hours",
"field_threshold": "Alert Threshold",
"option_allthetimes": "all the times",
"option_atleastonce": "at least once",
"option_onaverage": "on average",
"option_intotal": "in total",
"option_above": "above",
"option_below": "below",
"option_equal": "is equal to",
"option_notequal": "not equal to",
"button_query": "Query",
"button_formula": "Formula",
"tab_qb": "Query Builder",
"tab_promql": "PromQL",
"tab_chquery": "ClickHouse Query",
"title_confirm": "Confirm",
"button_ok": "Yes",
"button_cancel": "No",
"field_promql_expr": "PromQL Expression",
"field_alert_name": "Alert Name",
"field_alert_desc": "Alert Description",
"field_labels": "Labels",
"field_severity": "Severity",
"option_critical": "Critical",
"option_error": "Error",
"option_warning": "Warning",
"option_info": "Info",
"user_guide_headline": "Steps to create an Alert",
"user_guide_qb_step1": "Step 1 - Define the metric",
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
"user_guide_qb_step2b": "Enter the Alert threshold",
"user_guide_qb_step3": "Step 3 -Alert Configuration",
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
"user_guide_pql_step1": "Step 1 - Define the metric",
"user_guide_pql_step1a": "Write a PromQL query for the metric",
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
"user_guide_pql_step2b": "Enter the Alert threshold",
"user_guide_pql_step3": "Step 3 -Alert Configuration",
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
"user_guide_ch_step1": "Step 1 - Define the metric",
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
"user_guide_ch_step2b": "Enter the Alert threshold",
"user_guide_ch_step3": "Step 3 -Alert Configuration",
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
"user_tooltip_more_help": "More details on how to create alerts",
"choose_alert_type": "Choose a type for the alert:",
"metric_based_alert": "Metric based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
"log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert",
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
"exceptions_based_alert": "Exceptions-based Alert",
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
"field_unit": "Threshold unit",
"selected_query_placeholder": "Select query"
"target_missing": "Please enter a threshold to proceed",
"rule_test_fired": "Test notification sent successfully",
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
"button_testrule": "Test Notification",
"label_channel_select": "Notification Channels",
"placeholder_channel_select": "select one or more channels",
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
"preview_chart_threshold_label": "Threshold",
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
"button_yes": "Yes",
"button_no": "No",
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
"remove_label_success": "Labels cleared",
"alert_form_step1": "Step 1 - Define the metric",
"alert_form_step2": "Step 2 - Define Alert Conditions",
"alert_form_step3": "Step 3 - Alert Configuration",
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
"confirm_save_title": "Save Changes",
"confirm_save_content_part1": "Your alert built with",
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
"rule_created": "Rule created successfully",
"rule_edited": "Rule edited successfully",
"expression_missing": "expression is missing in {{where}}",
"metricname_missing": "metric name is missing in {{where}}",
"condition_required": "at least one metric condition is required",
"alertname_required": "alert name is required",
"promql_required": "promql expression is required when query format is set to PromQL",
"chquery_required": "query is required when query format is set to ClickHouse",
"button_savechanges": "Save Rule",
"button_createrule": "Create Rule",
"button_returntorules": "Return to rules",
"button_cancelchanges": "Cancel",
"button_discard": "Discard",
"text_condition1": "Send a notification when the metric is",
"text_condition2": "the threshold",
"text_condition3": "during the last",
"option_5min": "5 mins",
"option_10min": "10 mins",
"option_15min": "15 mins",
"option_60min": "60 mins",
"option_4hours": "4 hours",
"option_24hours": "24 hours",
"field_threshold": "Alert Threshold",
"option_allthetimes": "all the times",
"option_atleastonce": "at least once",
"option_onaverage": "on average",
"option_intotal": "in total",
"option_above": "above",
"option_below": "below",
"option_equal": "is equal to",
"option_notequal": "not equal to",
"button_query": "Query",
"button_formula": "Formula",
"tab_qb": "Query Builder",
"tab_promql": "PromQL",
"tab_chquery": "ClickHouse Query",
"title_confirm": "Confirm",
"button_ok": "Yes",
"button_cancel": "No",
"field_promql_expr": "PromQL Expression",
"field_alert_name": "Alert Name",
"field_alert_desc": "Alert Description",
"field_labels": "Labels",
"field_severity": "Severity",
"option_critical": "Critical",
"option_error": "Error",
"option_warning": "Warning",
"option_info": "Info",
"user_guide_headline": "Steps to create an Alert",
"user_guide_qb_step1": "Step 1 - Define the metric",
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
"user_guide_qb_step2b": "Enter the Alert threshold",
"user_guide_qb_step3": "Step 3 -Alert Configuration",
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
"user_guide_pql_step1": "Step 1 - Define the metric",
"user_guide_pql_step1a": "Write a PromQL query for the metric",
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
"user_guide_pql_step2b": "Enter the Alert threshold",
"user_guide_pql_step3": "Step 3 -Alert Configuration",
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
"user_guide_ch_step1": "Step 1 - Define the metric",
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
"user_guide_ch_step2b": "Enter the Alert threshold",
"user_guide_ch_step3": "Step 3 -Alert Configuration",
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
"user_tooltip_more_help": "More details on how to create alerts",
"choose_alert_type": "Choose a type for the alert:",
"metric_based_alert": "Metric based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
"log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert",
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
"exceptions_based_alert": "Exceptions-based Alert",
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
}

Some files were not shown because too many files have changed in this diff Show More