Compare commits
256 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58b0c08d71 | ||
|
|
dd9cbcee33 | ||
|
|
897728cc71 | ||
|
|
bdf78cbf2c | ||
|
|
90566360ae | ||
|
|
0a6fa0ee85 | ||
|
|
73c2137cd7 | ||
|
|
dbe68c064c | ||
|
|
ba7427f280 | ||
|
|
6dbc11991b | ||
|
|
eeae71163c | ||
|
|
a25e7a64ce | ||
|
|
d0e272b679 | ||
|
|
47e6e00a64 | ||
|
|
282c47def8 | ||
|
|
9d3fc493a3 | ||
|
|
b2afb9aabc | ||
|
|
a733adad2c | ||
|
|
cc18cc9087 | ||
|
|
ecb2ed8ac8 | ||
|
|
31931e5a6c | ||
|
|
31848c488d | ||
|
|
bdcc997672 | ||
|
|
d68334b2ca | ||
|
|
3ebded66ea | ||
|
|
2ed24df250 | ||
|
|
5a34ce2221 | ||
|
|
aae6a1adf1 | ||
|
|
bef83d30cc | ||
|
|
1ebf3dbf65 | ||
|
|
f57808bdb4 | ||
|
|
6bdcd4f5bb | ||
|
|
d726ad9ca6 | ||
|
|
4ed3295b80 | ||
|
|
72dc4d62ce | ||
|
|
186f4dca71 | ||
|
|
e4f2219f8c | ||
|
|
fe9a6c2448 | ||
|
|
5c2a875211 | ||
|
|
6dab77409d | ||
|
|
0f811af34e | ||
|
|
bdbcbb5f6c | ||
|
|
ae91d7e8a9 | ||
|
|
64927acd97 | ||
|
|
9dae957c8f | ||
|
|
afbcde5edc | ||
|
|
b8c3fd1cbf | ||
|
|
93cf5dfa46 | ||
|
|
d2c28a47c2 | ||
|
|
9c68c6af93 | ||
|
|
3771f85c7d | ||
|
|
b39e0465b0 | ||
|
|
bc97ea8fc0 | ||
|
|
1e980c3886 | ||
|
|
5ec52f03ad | ||
|
|
4aab923e40 | ||
|
|
17b0ee5434 | ||
|
|
08c3c4c51c | ||
|
|
5f802e0e20 | ||
|
|
63e663a92d | ||
|
|
d21ab7b82d | ||
|
|
84b876170d | ||
|
|
88d8dba90e | ||
|
|
d7d0d70aa5 | ||
|
|
671b441ec9 | ||
|
|
729c7fce7b | ||
|
|
224ec8d0d9 | ||
|
|
7eed865660 | ||
|
|
241121ebec | ||
|
|
15af158a9c | ||
|
|
2f02aeb031 | ||
|
|
3603e497a6 | ||
|
|
070d32a0ef | ||
|
|
0b36da714f | ||
|
|
ce0ac1e3af | ||
|
|
bcb5256de0 | ||
|
|
fdca72b9b2 | ||
|
|
7f64dfd023 | ||
|
|
8871d53ae0 | ||
|
|
2313ec3f9a | ||
|
|
56208c9b06 | ||
|
|
84e281271c | ||
|
|
43e4f637d1 | ||
|
|
c156b9c403 | ||
|
|
9885572842 | ||
|
|
4803fd9c8e | ||
|
|
c2fe35388e | ||
|
|
ba5e3dcfd3 | ||
|
|
9c8c31d912 | ||
|
|
469254e9fc | ||
|
|
1f2ec0d728 | ||
|
|
ff1fc83b66 | ||
|
|
0a5eff2255 | ||
|
|
24e84bac2a | ||
|
|
db00a78a4e | ||
|
|
4d2e8b0ea5 | ||
|
|
4f12f8c85c | ||
|
|
fabab345cb | ||
|
|
00355b3383 | ||
|
|
c16ae790d4 | ||
|
|
c6d57a7a53 | ||
|
|
d8775c91d7 | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
b3dfd567e0 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
660391c360 | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
8a0bcf6cd9 | ||
|
|
0c06c5ee0e | ||
|
|
f3610ffe55 | ||
|
|
d150cfa46c | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
f149258de2 | ||
|
|
d6c4df8b4b | ||
|
|
31443dabe7 |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -2,5 +2,6 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
/frontend/ @palash-signoz @pranshuchittora
|
||||
/frontend/ @palashgdev @pranshuchittora
|
||||
/deploy/ @prashant-shahi
|
||||
/pkg/query-service/ @srikanthccv @makeavish @nityanandagohain
|
||||
|
||||
47
.github/workflows/build.yaml
vendored
47
.github/workflows/build.yaml
vendored
@@ -1,40 +1,15 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- "pkg/**"
|
||||
- "frontend/**"
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
get_filters:
|
||||
runs-on: ubuntu-latest
|
||||
# Set job outputs to values from filter step
|
||||
outputs:
|
||||
frontend: ${{ steps.filter.outputs.frontend }}
|
||||
query-service: ${{ steps.filter.outputs.query-service }}
|
||||
flattener: ${{ steps.filter.outputs.flattener }}
|
||||
steps:
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
frontend:
|
||||
- 'frontend/**'
|
||||
query-service:
|
||||
- 'pkg/query-service/**'
|
||||
flattener:
|
||||
- 'pkg/processors/flattener/**'
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
@@ -42,6 +17,8 @@ jobs:
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
- name: Run Jest
|
||||
run: cd frontend && npm run jest
|
||||
- name: TSC
|
||||
run: yarn tsc
|
||||
working-directory: ./frontend
|
||||
@@ -52,9 +29,6 @@ jobs:
|
||||
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
@@ -62,16 +36,3 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
make build-query-service-amd64
|
||||
|
||||
build-flattener:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build flattener docker image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-flattener-amd64
|
||||
|
||||
17
.github/workflows/codeball.yml
vendored
Normal file
17
.github/workflows/codeball.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Codeball
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
codeball_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Codeball
|
||||
steps:
|
||||
# Run Codeball on all new Pull Requests 🚀
|
||||
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
|
||||
- name: Codeball
|
||||
uses: sturdy-dev/codeball-action@v2
|
||||
with:
|
||||
approvePullRequests: "true"
|
||||
labelPullRequestsWhenApproved: "true"
|
||||
labelPullRequestsWhenReviewNeeded: "false"
|
||||
failJobsWhenReviewNeeded: "false"
|
||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
env:
|
||||
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
jobs:
|
||||
create_issue_on_merge:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
uses: bahmutov/npm-install@v1
|
||||
with:
|
||||
install-command: yarn --frozen-lockfile
|
||||
- name: Comment on PR
|
||||
run: node create-issue.js
|
||||
24
.github/workflows/playwright.yaml
vendored
Normal file
24
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Playwright Tests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
playwright:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "16.x"
|
||||
- name: Install dependencies
|
||||
run: CI=1 yarn install
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps
|
||||
- name: Run Playwright tests
|
||||
run: yarn playwright
|
||||
env:
|
||||
# This might depend on your test-runner/language binding
|
||||
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -15,6 +15,7 @@ frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
frontend/test-results
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
@@ -27,10 +28,6 @@ frontend/npm-debug.log*
|
||||
frontend/yarn-debug.log*
|
||||
frontend/yarn-error.log*
|
||||
frontend/src/constants/env.ts
|
||||
frontend/cypress/**/*.mp4
|
||||
|
||||
# env file for cypress
|
||||
frontend/cypress.env.json
|
||||
|
||||
.idea
|
||||
|
||||
|
||||
@@ -106,32 +106,70 @@ Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/char
|
||||
- [k3d](https://k3d.io/#installation)
|
||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
||||
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
|
||||
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace.
|
||||
- run `kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
|
||||
**To install HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
**To load data with HotROD sample app:**
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**To stop the load generation:**
|
||||
|
||||
```sh
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
**To delete HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## General Instructions
|
||||
|
||||
**Before making any significant changes, please open an issue**. Each issue
|
||||
should describe the following:
|
||||
|
||||
* Requirement - what kind of use case are you trying to solve?
|
||||
* Proposal - what do you suggest to solve the problem or improve the existing
|
||||
situation?
|
||||
* Any open questions to address
|
||||
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone. Once the approach is agreed upon, make your changes
|
||||
and open a pull request(s). Unless your change is small, Please consider submitting different PRs:
|
||||
|
||||
* First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes etc...
|
||||
* This PR is usually trivial to review, so the size limit does not apply to
|
||||
it.
|
||||
* Second PR should include the concrete implementation of the component. If the
|
||||
size of this PR is larger than the recommended size consider splitting it in
|
||||
multiple PRs.
|
||||
* If there are multiple sub-component then ideally each one should be implemented as
|
||||
a separate pull request.
|
||||
* Last PR should include changes to any user facing documentation. And should include
|
||||
end to end tests if applicable. The component must be enabled
|
||||
only after sufficient testing, and there is enough confidence in the
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
||||
|
||||
- If you find any bugs, please create an issue
|
||||
|
||||
25
Makefile
25
Makefile
@@ -10,7 +10,6 @@ BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
FLATTENER_DIRECTORY ?= pkg/processors/flattener
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
@@ -20,7 +19,6 @@ DOCKER_TAG ?= latest
|
||||
|
||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
|
||||
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/query-service
|
||||
@@ -31,7 +29,7 @@ gitBranch=${PACKAGE}/version.gitBranch
|
||||
|
||||
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||
|
||||
all: build-push-frontend build-push-query-service build-push-flattener
|
||||
all: build-push-frontend build-push-query-service
|
||||
# Steps to build and push docker image of frontend
|
||||
.PHONY: build-frontend-amd64 build-push-frontend
|
||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||
@@ -73,27 +71,6 @@ build-push-query-service:
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of flattener
|
||||
.PHONY: build-flattener-amd64 build-push-flattener
|
||||
# Step to build docker image of flattener in amd64 (used in build pipeline)
|
||||
build-flattener-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building flattener docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
|
||||
build-push-flattener:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing flattener docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane \
|
||||
--no-cache --push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
dev-setup:
|
||||
mkdir -p /var/lib/signoz
|
||||
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
|
||||
|
||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||
|
||||
## Supported Versions
|
||||
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||
|
||||
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||
|
||||
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thank you for keeping SigNoz and our users safe. 🙇
|
||||
File diff suppressed because it is too large
Load Diff
28
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
28
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,28 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -2,12 +2,15 @@ version: "3.9"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
deploy:
|
||||
restart_policy:
|
||||
@@ -28,7 +31,7 @@ services:
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- --queryService.url=http://query-service:8080
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
@@ -37,10 +40,11 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.8.0
|
||||
image: signoz/query-service:0.9.1
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
@@ -64,7 +68,7 @@ services:
|
||||
- clickhouse
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.8.0
|
||||
image: signoz/frontend:0.9.1
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -77,7 +81,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.43.0-0.1
|
||||
image: signoz/otelcontribcol:0.45.1-1.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -85,7 +89,7 @@ services:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133" # health_check
|
||||
# - "13133:13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
@@ -103,7 +107,7 @@ services:
|
||||
- clickhouse
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.43.0-0.1
|
||||
image: signoz/otelcontribcol:0.45.1-1.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
|
||||
@@ -12,7 +12,7 @@ receivers:
|
||||
grpc:
|
||||
thrift_http:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
@@ -22,7 +22,8 @@ receivers:
|
||||
network:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
@@ -52,7 +53,7 @@ extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
@@ -66,7 +67,7 @@ service:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
processors: [batch]
|
||||
|
||||
@@ -9,12 +9,13 @@ receivers:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
|
||||
@@ -12,13 +12,18 @@ server {
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
add_header Cache-Control "no-store, no-cache, must-revalidate, max-age=0";
|
||||
add_header Last-Modified $date_gmt;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
28
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
28
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,28 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
1304
deploy/docker/clickhouse-setup/config.xml
Normal file
1304
deploy/docker/clickhouse-setup/config.xml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,131 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8080
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.8.0
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-arm
|
||||
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.8.0
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.43.0-0.1
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
mem_limit: 2000m
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.43.0-0.1
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -2,12 +2,15 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
@@ -30,15 +33,18 @@ services:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8080
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.8.0
|
||||
image: signoz/query-service:0.9.1
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
@@ -60,7 +66,7 @@ services:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.8.0
|
||||
image: signoz/frontend:0.9.1
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -72,7 +78,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.43.0-0.1
|
||||
image: signoz/otelcontribcol:0.45.1-1.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -80,7 +86,7 @@ services:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133" # health_check
|
||||
# - "13133:13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
@@ -93,7 +99,7 @@ services:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.43.0-0.1
|
||||
image: signoz/otelcontribcol:0.45.1-1.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
|
||||
@@ -12,7 +12,7 @@ receivers:
|
||||
grpc:
|
||||
thrift_http:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
@@ -22,7 +22,8 @@ receivers:
|
||||
network:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
@@ -52,7 +53,7 @@ extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
@@ -66,7 +67,7 @@ service:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
processors: [batch]
|
||||
|
||||
@@ -9,12 +9,13 @@ receivers:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
|
||||
123
deploy/docker/clickhouse-setup/users.xml
Normal file
123
deploy/docker/clickhouse-setup/users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -9,17 +9,23 @@ server {
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
|
||||
large_client_header_buffers 8 16k;
|
||||
|
||||
location / {
|
||||
add_header Cache-Control "no-store, no-cache, must-revalidate, max-age=0";
|
||||
add_header Last-Modified $date_gmt;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager{
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- coordinator_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_tiny/coordinator
|
||||
- environment_tiny/common
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_tiny/broker
|
||||
- environment_tiny/common
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- historical_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_tiny/historical
|
||||
- environment_tiny/common
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- middle_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_tiny/middlemanager
|
||||
- environment_tiny/common
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_tiny/router
|
||||
- environment_tiny/common
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,269 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- coordinator_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_small/coordinator
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_small/broker
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- historical_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_small/historical
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- middle_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_small/middlemanager
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_small/router
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ./locust-scripts:/locust
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=100MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=1g
|
||||
DRUID_XMS=1g
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=128m
|
||||
DRUID_XMS=128m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,26 +0,0 @@
|
||||
# For S3 storage
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
||||
|
||||
|
||||
# druid_storage_type=s3
|
||||
# druid_storage_bucket=<s3-bucket-name>
|
||||
# druid_storage_baseKey=druid/segments
|
||||
|
||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
||||
# AWS_REGION=<s3-aws-region>
|
||||
|
||||
# druid_indexer_logs_type=s3
|
||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# For local storage
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/data/segments
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,50 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,51 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -36,9 +36,9 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
# is_arm64(){
|
||||
# [[ `uname -m` == 'arm64' ]]
|
||||
# }
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
@@ -237,11 +237,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if is_arm64; then
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
|
||||
else
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
@@ -333,7 +329,6 @@ fi
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
@@ -346,8 +341,6 @@ fi
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# else
|
||||
# setup_type='druid'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
@@ -469,22 +462,14 @@ start_docker
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
if is_arm64; then
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
|
||||
else
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if is_arm64; then
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -513,11 +498,7 @@ else
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
|
||||
if is_arm64; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
fi
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
node_modules
|
||||
.vscode
|
||||
build
|
||||
.env
|
||||
.env
|
||||
.git
|
||||
|
||||
4
frontend/.husky/commit-msg
Executable file
4
frontend/.husky/commit-msg
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && npm run commitlint
|
||||
@@ -1 +1 @@
|
||||
12.13.0
|
||||
16.15.0
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage1 as builder
|
||||
FROM node:12.22.0 as builder
|
||||
# Builder stage
|
||||
FROM node:16.15.0-slim as builder
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -9,24 +9,23 @@ ARG TARGETARCH
|
||||
|
||||
WORKDIR /frontend
|
||||
|
||||
# copy the package.json to install dependencies
|
||||
# Copy the package.json to install dependencies
|
||||
COPY package.json ./
|
||||
|
||||
# Install the dependencies and make the folder
|
||||
RUN yarn install
|
||||
RUN CI=1 yarn install
|
||||
|
||||
COPY . .
|
||||
|
||||
# Build the project and copy the files
|
||||
RUN yarn build
|
||||
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
#!/bin/sh
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
## Remove default nginx index page
|
||||
# Remove default nginx index page
|
||||
RUN rm -rf /usr/share/nginx/html/*
|
||||
|
||||
# Copy from the stahg 1
|
||||
@@ -34,4 +33,4 @@ COPY --from=builder /frontend/build /usr/share/nginx/html
|
||||
|
||||
EXPOSE 3301
|
||||
|
||||
ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||
ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||
|
||||
1
frontend/commitlint.config.js
Normal file
1
frontend/commitlint.config.js
Normal file
@@ -0,0 +1 @@
|
||||
module.exports = { extends: ['@commitlint/config-conventional'] };
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"video": false
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/* eslint-disable @typescript-eslint/no-unused-expressions */
|
||||
const Login = ({ email, name }: LoginProps): void => {
|
||||
const emailInput = cy.findByPlaceholderText('name@yourcompany.com');
|
||||
|
||||
emailInput.then((emailInput) => {
|
||||
const element = emailInput[0];
|
||||
// element is present
|
||||
expect(element).not.undefined;
|
||||
expect(element.nodeName).to.be.equal('INPUT');
|
||||
});
|
||||
emailInput.type(email).then((inputElements) => {
|
||||
const inputElement = inputElements[0];
|
||||
const inputValue = inputElement.getAttribute('value');
|
||||
expect(inputValue).to.be.equals(email);
|
||||
});
|
||||
|
||||
const firstNameInput = cy.findByPlaceholderText('Your Name');
|
||||
firstNameInput.then((firstNameInput) => {
|
||||
const element = firstNameInput[0];
|
||||
// element is present
|
||||
expect(element).not.undefined;
|
||||
expect(element.nodeName).to.be.equal('INPUT');
|
||||
});
|
||||
|
||||
firstNameInput.type(name).then((inputElements) => {
|
||||
const inputElement = inputElements[0];
|
||||
const inputValue = inputElement.getAttribute('value');
|
||||
expect(inputValue).to.be.equals(name);
|
||||
});
|
||||
|
||||
const gettingStartedButton = cy.findByText('Get Started');
|
||||
gettingStartedButton.click();
|
||||
|
||||
cy
|
||||
.intercept('POST', '/api/v1/user?email*', {
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('defaultUser');
|
||||
|
||||
cy.wait('@defaultUser');
|
||||
};
|
||||
|
||||
export interface LoginProps {
|
||||
email: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export default Login;
|
||||
@@ -1,49 +0,0 @@
|
||||
import {
|
||||
getDefaultOption,
|
||||
getOptions,
|
||||
} from 'container/Header/DateTimeSelection/config';
|
||||
// import { AppState } from 'store/reducers';
|
||||
|
||||
const CheckRouteDefaultGlobalTimeOptions = ({
|
||||
route,
|
||||
}: CheckRouteDefaultGlobalTimeOptionsProps): void => {
|
||||
cy.visit(Cypress.env('baseUrl') + route);
|
||||
|
||||
const allOptions = getOptions(route);
|
||||
|
||||
const defaultValue = getDefaultOption(route);
|
||||
|
||||
const defaultSelectedOption = allOptions.find((e) => e.value === defaultValue);
|
||||
|
||||
expect(defaultSelectedOption).not.undefined;
|
||||
|
||||
cy
|
||||
.findAllByTestId('dropDown')
|
||||
.find('span')
|
||||
.then((el) => {
|
||||
const elements = el.get();
|
||||
|
||||
const item = elements[1];
|
||||
|
||||
expect(defaultSelectedOption?.label).to.be.equals(
|
||||
item.innerText,
|
||||
'Default option is not matching',
|
||||
);
|
||||
});
|
||||
|
||||
// cy
|
||||
// .window()
|
||||
// .its('store')
|
||||
// .invoke('getState')
|
||||
// .then((e: AppState) => {
|
||||
// const { globalTime } = e;
|
||||
// const { maxTime, minTime } = globalTime;
|
||||
// // @TODO match the global min time and max time according to the selected option
|
||||
// });
|
||||
};
|
||||
|
||||
export interface CheckRouteDefaultGlobalTimeOptionsProps {
|
||||
route: string;
|
||||
}
|
||||
|
||||
export default CheckRouteDefaultGlobalTimeOptions;
|
||||
@@ -1,11 +0,0 @@
|
||||
const resizeObserverLoopErrRe = /ResizeObserver loop limit exceeded/;
|
||||
|
||||
const unCaughtExpection = (): void => {
|
||||
cy.on('uncaught:exception', (err) => {
|
||||
// returning false here prevents Cypress from
|
||||
// failing the test
|
||||
return !resizeObserverLoopErrRe.test(err.message);
|
||||
});
|
||||
};
|
||||
|
||||
export default unCaughtExpection;
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 1,
|
||||
"name": "First Channels",
|
||||
"type": "slack",
|
||||
"updated_at": 1638083159246
|
||||
},
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 2,
|
||||
"name": "Second Channels",
|
||||
"type": "Slack",
|
||||
"updated_at": 1638083159246
|
||||
}
|
||||
],
|
||||
"message": "Success"
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
[
|
||||
{
|
||||
"serviceName": "frontend",
|
||||
"p99": 1134610000,
|
||||
"avgDuration": 744523000,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
},
|
||||
{
|
||||
"serviceName": "customer",
|
||||
"p99": 734422400,
|
||||
"avgDuration": 348678530,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
},
|
||||
{
|
||||
"serviceName": "driver",
|
||||
"p99": 239234080,
|
||||
"avgDuration": 204662290,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
}
|
||||
]
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"rules": [
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "First Rule",
|
||||
"id": 1
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Second Rule",
|
||||
"id": 2
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "P0" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Third Rule",
|
||||
"id": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
{ "status": "success", "data": { "resultType": "matrix", "result": [] } }
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"resultType": "matrix",
|
||||
"result": [
|
||||
{
|
||||
"metric": {},
|
||||
"values": [
|
||||
[1634741764.961, "0.9"],
|
||||
[1634741824.961, "0.9"],
|
||||
[1634741884.961, "0.8666666666666667"],
|
||||
[1634741944.961, "1"],
|
||||
[1634742004.961, "0.9166666666666666"],
|
||||
[1634742064.961, "0.95"],
|
||||
[1634742124.961, "0.9333333333333333"],
|
||||
[1634742184.961, "0.95"],
|
||||
[1634742244.961, "1.0333333333333334"],
|
||||
[1634742304.961, "0.9333333333333333"],
|
||||
[1634742364.961, "0.9166666666666666"],
|
||||
[1634742424.961, "0.9"],
|
||||
[1634742484.961, "1.0166666666666666"],
|
||||
[1634742544.961, "0.8333333333333334"],
|
||||
[1634742604.961, "0.9166666666666666"],
|
||||
[1634742664.961, "0.95"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
[
|
||||
{
|
||||
"timestamp": 1634742600000000000,
|
||||
"p50": 720048500,
|
||||
"p95": 924409540,
|
||||
"p99": 974744300,
|
||||
"numCalls": 48,
|
||||
"callRate": 0.8,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742540000000000,
|
||||
"p50": 712614000,
|
||||
"p95": 955580700,
|
||||
"p99": 1045595400,
|
||||
"numCalls": 59,
|
||||
"callRate": 0.98333335,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742480000000000,
|
||||
"p50": 720842000,
|
||||
"p95": 887187600,
|
||||
"p99": 943676860,
|
||||
"numCalls": 53,
|
||||
"callRate": 0.8833333,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742420000000000,
|
||||
"p50": 712287000,
|
||||
"p95": 908505540,
|
||||
"p99": 976507650,
|
||||
"numCalls": 58,
|
||||
"callRate": 0.96666664,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742360000000000,
|
||||
"p50": 697125500,
|
||||
"p95": 975581800,
|
||||
"p99": 1190121900,
|
||||
"numCalls": 54,
|
||||
"callRate": 0.9,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742300000000000,
|
||||
"p50": 711592500,
|
||||
"p95": 880559900,
|
||||
"p99": 1100105500,
|
||||
"numCalls": 40,
|
||||
"callRate": 0.6666667,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
}
|
||||
]
|
||||
@@ -1,9 +0,0 @@
|
||||
[
|
||||
{
|
||||
"p50": 710824000,
|
||||
"p95": 1003231400,
|
||||
"p99": 1231265500,
|
||||
"numCalls": 299,
|
||||
"name": "HTTP GET /dispatch"
|
||||
}
|
||||
]
|
||||
@@ -1,35 +0,0 @@
|
||||
{
|
||||
"items": {
|
||||
"1644926280000000000": { "timestamp": 1644926280000000000, "value": 787 },
|
||||
"1644926340000000000": { "timestamp": 1644926340000000000, "value": 2798 },
|
||||
"1644926400000000000": { "timestamp": 1644926400000000000, "value": 2828 },
|
||||
"1644926460000000000": { "timestamp": 1644926460000000000, "value": 2926 },
|
||||
"1644926520000000000": { "timestamp": 1644926520000000000, "value": 2932 },
|
||||
"1644926580000000000": { "timestamp": 1644926580000000000, "value": 2842 },
|
||||
"1644926640000000000": { "timestamp": 1644926640000000000, "value": 2966 },
|
||||
"1644926700000000000": { "timestamp": 1644926700000000000, "value": 2782 },
|
||||
"1644926760000000000": { "timestamp": 1644926760000000000, "value": 2843 },
|
||||
"1644926820000000000": { "timestamp": 1644926820000000000, "value": 2864 },
|
||||
"1644926880000000000": { "timestamp": 1644926880000000000, "value": 2777 },
|
||||
"1644926940000000000": { "timestamp": 1644926940000000000, "value": 2820 },
|
||||
"1644927000000000000": { "timestamp": 1644927000000000000, "value": 2579 },
|
||||
"1644927060000000000": { "timestamp": 1644927060000000000, "value": 2681 },
|
||||
"1644927120000000000": { "timestamp": 1644927120000000000, "value": 2828 },
|
||||
"1644927180000000000": { "timestamp": 1644927180000000000, "value": 2975 },
|
||||
"1644927240000000000": { "timestamp": 1644927240000000000, "value": 2934 },
|
||||
"1644927300000000000": { "timestamp": 1644927300000000000, "value": 2793 },
|
||||
"1644927360000000000": { "timestamp": 1644927360000000000, "value": 2913 },
|
||||
"1644927420000000000": { "timestamp": 1644927420000000000, "value": 2621 },
|
||||
"1644927480000000000": { "timestamp": 1644927480000000000, "value": 2631 },
|
||||
"1644927540000000000": { "timestamp": 1644927540000000000, "value": 2924 },
|
||||
"1644927600000000000": { "timestamp": 1644927600000000000, "value": 2576 },
|
||||
"1644927660000000000": { "timestamp": 1644927660000000000, "value": 2878 },
|
||||
"1644927720000000000": { "timestamp": 1644927720000000000, "value": 2737 },
|
||||
"1644927780000000000": { "timestamp": 1644927780000000000, "value": 2621 },
|
||||
"1644927840000000000": { "timestamp": 1644927840000000000, "value": 2823 },
|
||||
"1644927900000000000": { "timestamp": 1644927900000000000, "value": 3081 },
|
||||
"1644927960000000000": { "timestamp": 1644927960000000000, "value": 2883 },
|
||||
"1644928020000000000": { "timestamp": 1644928020000000000, "value": 2823 },
|
||||
"1644928080000000000": { "timestamp": 1644928080000000000, "value": 455 }
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"serviceName": {
|
||||
"customer": 1642,
|
||||
"driver": 1642,
|
||||
"frontend": 39408,
|
||||
"mysql": 1642,
|
||||
"redis": 22167,
|
||||
"route": 16420
|
||||
},
|
||||
"status": { "error": 4105, "ok": 78816 },
|
||||
"duration": { "maxDuration": 1253979000, "minDuration": 415000 },
|
||||
"operation": {},
|
||||
"httpCode": {},
|
||||
"httpUrl": {},
|
||||
"httpMethod": {},
|
||||
"httpRoute": {},
|
||||
"httpHost": {},
|
||||
"component": {}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
{
|
||||
"spans": [
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:09.542074Z",
|
||||
"spanID": "303b39065c6f5df5",
|
||||
"traceID": "00000000000000007fc49fab3cb75958",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 313418000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:08.84038Z",
|
||||
"spanID": "557e8303bc802992",
|
||||
"traceID": "000000000000000079310bd1d435a92b",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 318203000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:08.867689Z",
|
||||
"spanID": "347113dd916dd20e",
|
||||
"traceID": "00000000000000004c22c0409cee0f66",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 512810000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:07.060882Z",
|
||||
"spanID": "0a8d07f72aa1339b",
|
||||
"traceID": "0000000000000000488e11a35959de96",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 588705000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:07.134107Z",
|
||||
"spanID": "0acd4ec344675998",
|
||||
"traceID": "00000000000000000292efc7945d9bfa",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 801632000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:06.474095Z",
|
||||
"spanID": "3ae72e433301822a",
|
||||
"traceID": "00000000000000001ac3004ff1b7eefe",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 306650000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:06.996246Z",
|
||||
"spanID": "1d765427af673039",
|
||||
"traceID": "00000000000000002e78f59fabbcdecf",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 311469000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:05.324296Z",
|
||||
"spanID": "0987c90d83298a1d",
|
||||
"traceID": "0000000000000000077bcb960609a350",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 290680000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:02.458221Z",
|
||||
"spanID": "5b0d0d403dd9acf4",
|
||||
"traceID": "00000000000000007ae5b0aa69242556",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 262763000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
},
|
||||
{
|
||||
"timestamp": "2022-02-15T12:16:00.584939Z",
|
||||
"spanID": "3beafb277a76b9b4",
|
||||
"traceID": "00000000000000000ab44953c2fd949e",
|
||||
"serviceName": "customer",
|
||||
"operation": "HTTP GET /customer",
|
||||
"durationNano": 302851000,
|
||||
"httpCode": "200",
|
||||
"httpMethod": "GET"
|
||||
}
|
||||
],
|
||||
"totalSpans": 82921
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
describe('App Layout', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
});
|
||||
|
||||
it('Check the user is in Logged Out State', async () => {
|
||||
cy.location('pathname').then((e) => {
|
||||
expect(e).to.be.equal(ROUTES.SIGN_UP);
|
||||
});
|
||||
});
|
||||
|
||||
it('Logged In State', () => {
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,52 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
|
||||
|
||||
describe('Channels', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
|
||||
});
|
||||
|
||||
it('Channels', () => {
|
||||
cy
|
||||
.intercept('**channels**', {
|
||||
statusCode: 200,
|
||||
fixture: 'defaultAllChannels',
|
||||
})
|
||||
.as('All Channels');
|
||||
|
||||
cy.wait('@All Channels');
|
||||
|
||||
cy
|
||||
.get('.ant-tabs-tab')
|
||||
.children()
|
||||
.then((e) => {
|
||||
const child = e.get();
|
||||
|
||||
const secondChild = child[1];
|
||||
|
||||
expect(secondChild.outerText).to.be.equals('Alert Channels');
|
||||
|
||||
expect(secondChild.ariaSelected).to.be.equals('true');
|
||||
});
|
||||
|
||||
cy
|
||||
.get('tbody')
|
||||
.should('be.visible')
|
||||
.then((e) => {
|
||||
const allChildren = e.children().get();
|
||||
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
|
||||
|
||||
allChildren.forEach((e, index) => {
|
||||
expect(e.firstChild?.textContent).not.null;
|
||||
expect(e.firstChild?.textContent).to.be.equals(
|
||||
defaultAllChannels.data[index].name,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,44 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
describe('default time', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
});
|
||||
|
||||
it('Metrics Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.APPLICATION,
|
||||
});
|
||||
});
|
||||
|
||||
it('Dashboard Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.ALL_DASHBOARD,
|
||||
});
|
||||
});
|
||||
|
||||
it('Trace Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.TRACE,
|
||||
});
|
||||
});
|
||||
|
||||
it('Instrumentation Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.INSTRUMENTATION,
|
||||
});
|
||||
});
|
||||
|
||||
it('Service Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.SERVICE_MAP,
|
||||
});
|
||||
});
|
||||
|
||||
it('Settings Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.SETTINGS,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,126 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import getGlobalDropDownFormatedDate from 'lib/getGlobalDropDownFormatedDate';
|
||||
import { AppState } from 'store/reducers';
|
||||
|
||||
import topEndPoints from '../../fixtures/topEndPoints.json';
|
||||
|
||||
describe('Global Time Metrics Application', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
|
||||
it('Metrics Application', async () => {
|
||||
cy
|
||||
.intercept('GET', '/api/v1/services*', {
|
||||
fixture: 'defaultApp.json',
|
||||
})
|
||||
.as('defaultApps');
|
||||
|
||||
cy.wait('@defaultApps');
|
||||
|
||||
//clicking on frontend
|
||||
cy.get('tr:nth-child(1) > td:first-child').click();
|
||||
|
||||
cy
|
||||
.intercept('GET', '/api/v1/service/top_endpoints*', {
|
||||
fixture: 'topEndPoints.json',
|
||||
})
|
||||
.as('topEndPoints');
|
||||
|
||||
cy
|
||||
.intercept('GET', '/api/v1/service/overview?*', {
|
||||
fixture: 'serviceOverview.json',
|
||||
})
|
||||
.as('serviceOverview');
|
||||
|
||||
cy
|
||||
.intercept(
|
||||
'GET',
|
||||
`/api/v1/query_range?query=sum(rate(signoz_latency_count*`,
|
||||
{
|
||||
fixture: 'requestPerSecond.json',
|
||||
},
|
||||
)
|
||||
.as('requestPerSecond');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.its('store')
|
||||
.invoke('getState')
|
||||
.then((e: AppState) => {
|
||||
const { globalTime } = e;
|
||||
|
||||
const { maxTime, minTime } = globalTime;
|
||||
|
||||
// intercepting metrics application call
|
||||
|
||||
cy.wait('@topEndPoints');
|
||||
cy.wait('@serviceOverview');
|
||||
//TODO add errorPercentage also
|
||||
// cy.wait('@errorPercentage');
|
||||
cy.wait('@requestPerSecond');
|
||||
|
||||
cy
|
||||
.get('tbody tr:first-child td:first-child')
|
||||
.then((el) => {
|
||||
const elements = el.get();
|
||||
|
||||
expect(elements.length).to.be.equals(1);
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
expect(element.innerText).to.be.equals(topEndPoints[0].name);
|
||||
})
|
||||
.click();
|
||||
|
||||
cy
|
||||
.findAllByTestId('dropDown')
|
||||
.find('span.ant-select-selection-item')
|
||||
.then((e) => {
|
||||
const elements = e;
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
const customSelectedTime = element.innerText;
|
||||
|
||||
const startTime = new Date(minTime / 1000000);
|
||||
const endTime = new Date(maxTime / 1000000);
|
||||
|
||||
const startString = getGlobalDropDownFormatedDate(startTime);
|
||||
const endString = getGlobalDropDownFormatedDate(endTime);
|
||||
|
||||
const result = `${startString} - ${endString}`;
|
||||
|
||||
expect(customSelectedTime).to.be.equals(result);
|
||||
});
|
||||
|
||||
cy
|
||||
.findByTestId('dropDown')
|
||||
.click()
|
||||
.then(() => {
|
||||
cy.findByTitle('Last 30 min').click();
|
||||
});
|
||||
|
||||
cy
|
||||
.findByTestId('dropDown')
|
||||
.find('span.ant-select-selection-item')
|
||||
.then((e) => {
|
||||
const elements = e;
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
const selectedTime = element.innerText;
|
||||
|
||||
expect(selectedTime).to.be.equals('Last 30 min');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,67 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond';
|
||||
|
||||
import defaultApps from '../../fixtures/defaultApp.json';
|
||||
|
||||
describe('Metrics', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
|
||||
it('Default Apps', () => {
|
||||
cy
|
||||
.intercept('GET', '/api/v1/services*', {
|
||||
fixture: 'defaultApp.json',
|
||||
})
|
||||
.as('defaultApps');
|
||||
|
||||
cy.wait('@defaultApps');
|
||||
|
||||
cy.location().then((e) => {
|
||||
expect(e.pathname).to.be.equals(ROUTES.APPLICATION);
|
||||
|
||||
cy.get('tbody').then((elements) => {
|
||||
const trElements = elements.children();
|
||||
expect(trElements.length).to.be.equal(defaultApps.length);
|
||||
const getChildren = (row: Element): Element => {
|
||||
if (row.children.length === 0) {
|
||||
return row;
|
||||
}
|
||||
return getChildren(row.children[0]);
|
||||
};
|
||||
|
||||
// this is row element
|
||||
trElements.map((index, element) => {
|
||||
const [
|
||||
applicationElement,
|
||||
p99Element,
|
||||
errorRateElement,
|
||||
rpsElement,
|
||||
] = element.children;
|
||||
const applicationName = getChildren(applicationElement).innerHTML;
|
||||
const p99Name = getChildren(p99Element).innerHTML;
|
||||
const errorRateName = getChildren(errorRateElement).innerHTML;
|
||||
const rpsName = getChildren(rpsElement).innerHTML;
|
||||
const { serviceName, p99, errorRate, callRate } = defaultApps[index];
|
||||
expect(applicationName).to.be.equal(serviceName);
|
||||
expect(p99Name).to.be.equal(convertToNanoSecondsToSecond(p99).toString());
|
||||
expect(errorRateName).to.be.equals(
|
||||
parseFloat(errorRate.toString()).toFixed(2),
|
||||
);
|
||||
expect(rpsName).to.be.equals(callRate.toString());
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
export {};
|
||||
@@ -1,130 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultRules from '../../fixtures/defaultRules.json';
|
||||
|
||||
const defaultRuleRoutes = `**/rules/**`;
|
||||
|
||||
describe('Alerts', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy
|
||||
.intercept('get', '*rules*', {
|
||||
fixture: 'defaultRules',
|
||||
})
|
||||
.as('defaultRules');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
|
||||
|
||||
cy.wait('@defaultRules');
|
||||
});
|
||||
|
||||
it('Edit Rules Page Failure', async () => {
|
||||
cy
|
||||
.intercept(defaultRuleRoutes, {
|
||||
statusCode: 500,
|
||||
})
|
||||
.as('Get Rules Error');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Error');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then((e) => {
|
||||
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
|
||||
});
|
||||
|
||||
cy.findByText('Something went wrong').then((e) => {
|
||||
expect(e.length).to.be.equals(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Edit Rules Page Success', async () => {
|
||||
const text = 'this is the sample value';
|
||||
|
||||
cy
|
||||
.intercept(defaultRuleRoutes, {
|
||||
statusCode: 200,
|
||||
body: {
|
||||
data: {
|
||||
data: text,
|
||||
},
|
||||
},
|
||||
})
|
||||
.as('Get Rules Success');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Success');
|
||||
|
||||
cy.wait(1000);
|
||||
|
||||
cy.findByText('Save').then((e) => {
|
||||
const [el] = e.get();
|
||||
|
||||
el.click();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('All Rules are rendered correctly', async () => {
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then(({ pathname }) => {
|
||||
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const tarray = e.children().get();
|
||||
|
||||
expect(tarray.length).to.be.equals(3);
|
||||
|
||||
tarray.forEach(({ children }, index) => {
|
||||
const name = children[1]?.textContent;
|
||||
const label = children[2]?.textContent;
|
||||
|
||||
expect(name).to.be.equals(defaultRules.data.rules[index].name);
|
||||
|
||||
const defaultLabels = defaultRules.data.rules[index].labels;
|
||||
|
||||
expect(label).to.be.equals(defaultLabels['severity']);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Rules are Deleted', async () => {
|
||||
cy
|
||||
.intercept(defaultRuleRoutes, {
|
||||
body: {
|
||||
data: 'Deleted',
|
||||
message: 'Success',
|
||||
},
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('deleteRules');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
|
||||
firstDelete.click();
|
||||
});
|
||||
|
||||
cy.wait('@deleteRules');
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const trray = e.children().get();
|
||||
expect(trray.length).to.be.equals(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,160 +0,0 @@
|
||||
/* eslint-disable sonarjs/no-duplicate-string */
|
||||
import ROUTES from 'constants/routes';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { TraceFilterEnum } from 'types/reducer/trace';
|
||||
|
||||
import GraphInitialResponse from '../../fixtures/trace/initialAggregates.json';
|
||||
import FilterInitialResponse from '../../fixtures/trace/initialSpanFilter.json';
|
||||
import TableInitialResponse from '../../fixtures/trace/initialSpans.json';
|
||||
|
||||
const allFilters = '@Filters.all';
|
||||
const allGraphs = '@Graph.all';
|
||||
const allTable = '@Table.all';
|
||||
|
||||
describe('Trace', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy
|
||||
.intercept('POST', '**/aggregates', {
|
||||
fixture: 'trace/initialAggregates',
|
||||
})
|
||||
.as('Graph');
|
||||
|
||||
cy
|
||||
.intercept('POST', '**/getFilteredSpans', {
|
||||
fixture: 'trace/initialSpans',
|
||||
})
|
||||
.as('Table');
|
||||
|
||||
cy
|
||||
.intercept('POST', '**/api/v1/getSpanFilters', {
|
||||
fixture: 'trace/initialSpanFilter',
|
||||
})
|
||||
.as('Filters');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + `${ROUTES.TRACE}`);
|
||||
});
|
||||
|
||||
it('First Initial Load should go with 3 AJAX request', () => {
|
||||
cy.wait(['@Filters', '@Graph', '@Table']).then((e) => {
|
||||
const [filter, graph, table] = e;
|
||||
|
||||
const { body: filterBody } = filter.request;
|
||||
const { body: graphBody } = graph.request;
|
||||
const { body: tableBody } = table.request;
|
||||
|
||||
expect(filterBody.exclude.length).to.equal(0);
|
||||
expect(filterBody.getFilters.length).to.equal(3);
|
||||
filterBody.getFilters.forEach((filter: TraceFilterEnum) => {
|
||||
expect(filter).to.be.oneOf(['duration', 'status', 'serviceName']);
|
||||
});
|
||||
|
||||
expect(graphBody.function).to.be.equal('count');
|
||||
expect(graphBody.exclude.length).to.be.equal(0);
|
||||
expect(typeof graphBody.exclude).to.be.equal('object');
|
||||
|
||||
expect(tableBody.tags.length).to.be.equal(0);
|
||||
expect(typeof tableBody.tags).equal('object');
|
||||
|
||||
expect(tableBody.exclude.length).equals(0);
|
||||
});
|
||||
});
|
||||
|
||||
it('Render Time Request Response In All 3 Request', () => {
|
||||
cy.wait(['@Filters', '@Graph', '@Table']).then((e) => {
|
||||
const [filter, graph, table] = e;
|
||||
|
||||
expect(filter.response?.body).to.be.not.undefined;
|
||||
expect(filter.response?.body).to.be.not.NaN;
|
||||
|
||||
expect(JSON.stringify(filter.response?.body)).to.be.equals(
|
||||
JSON.stringify(FilterInitialResponse),
|
||||
);
|
||||
|
||||
expect(JSON.stringify(graph.response?.body)).to.be.equals(
|
||||
JSON.stringify(GraphInitialResponse),
|
||||
);
|
||||
|
||||
expect(JSON.stringify(table.response?.body)).to.be.equals(
|
||||
JSON.stringify(TableInitialResponse),
|
||||
);
|
||||
});
|
||||
cy.get(allFilters).should('have.length', 1);
|
||||
cy.get(allGraphs).should('have.length', 1);
|
||||
cy.get(allTable).should('have.length', 1);
|
||||
});
|
||||
|
||||
it('Clear All', () => {
|
||||
cy.wait(['@Filters', '@Graph', '@Table']);
|
||||
|
||||
expect(cy.findAllByText('Clear All')).not.to.be.undefined;
|
||||
|
||||
cy
|
||||
.window()
|
||||
.its('store')
|
||||
.invoke('getState')
|
||||
.then((e: AppState) => {
|
||||
const { traces } = e;
|
||||
expect(traces.isFilterExclude.get('status')).to.be.undefined;
|
||||
expect(traces.selectedFilter.size).to.be.equals(0);
|
||||
});
|
||||
|
||||
cy.findAllByText('Clear All').then((e) => {
|
||||
const [firstStatusClear] = e;
|
||||
|
||||
firstStatusClear.click();
|
||||
|
||||
cy.wait(['@Filters', '@Graph', '@Table']);
|
||||
|
||||
// insuring the api get call
|
||||
cy.get(allFilters).should('have.length', 2);
|
||||
cy.get(allGraphs).should('have.length', 2);
|
||||
cy.get(allTable).should('have.length', 2);
|
||||
|
||||
cy
|
||||
.window()
|
||||
.its('store')
|
||||
.invoke('getState')
|
||||
.then((e: AppState) => {
|
||||
const { traces } = e;
|
||||
|
||||
expect(traces.isFilterExclude.get('status')).to.be.equals(false);
|
||||
expect(traces.userSelectedFilter.get('status')).to.be.undefined;
|
||||
expect(traces.selectedFilter.size).to.be.equals(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Un Selecting one option from status', () => {
|
||||
cy.wait(['@Filters', '@Graph', '@Table']);
|
||||
|
||||
cy.get('input[type="checkbox"]').then((e) => {
|
||||
const [errorCheckbox] = e;
|
||||
errorCheckbox.click();
|
||||
|
||||
cy.wait(['@Filters', '@Graph', '@Table']).then((e) => {
|
||||
const [filter, graph, table] = e;
|
||||
const filterBody = filter.request.body;
|
||||
const graphBody = graph.request.body;
|
||||
const tableBody = table.request.body;
|
||||
|
||||
expect(filterBody.exclude).not.to.be.undefined;
|
||||
expect(filterBody.exclude.length).not.to.be.equal(0);
|
||||
expect(filterBody.exclude[0] === 'status').to.be.true;
|
||||
|
||||
expect(graphBody.exclude).not.to.be.undefined;
|
||||
expect(graphBody.exclude.length).not.to.be.equal(0);
|
||||
expect(graphBody.exclude[0] === 'status').to.be.true;
|
||||
|
||||
expect(tableBody.exclude).not.to.be.undefined;
|
||||
expect(tableBody.exclude.length).not.to.be.equal(0);
|
||||
expect(tableBody.exclude[0] === 'status').to.be.true;
|
||||
});
|
||||
|
||||
cy.get(allFilters).should('have.length', 2);
|
||||
cy.get(allGraphs).should('have.length', 2);
|
||||
cy.get(allTable).should('have.length', 2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,26 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
// ***********************************************************
|
||||
// This example plugins/index.js can be used to load plugins
|
||||
//
|
||||
// You can change the location of this file or turn off loading
|
||||
// the plugins file with the 'pluginsFile' configuration option.
|
||||
//
|
||||
// You can read more here:
|
||||
// https://on.cypress.io/plugins-guide
|
||||
// ***********************************************************
|
||||
|
||||
// This function is called when a project is opened or re-opened (e.g. due to
|
||||
// the project's config changing)
|
||||
|
||||
// cypress/plugins/index.ts
|
||||
|
||||
/// <reference types="cypress" />
|
||||
|
||||
/**
|
||||
* @type {Cypress.PluginConfig}
|
||||
*/
|
||||
module.exports = (): void => {
|
||||
return undefined;
|
||||
};
|
||||
|
||||
export {};
|
||||
@@ -1,24 +0,0 @@
|
||||
import '@testing-library/cypress/add-commands';
|
||||
|
||||
import CheckRouteDefaultGlobalTimeOptions, {
|
||||
CheckRouteDefaultGlobalTimeOptionsProps,
|
||||
} from '../CustomFunctions/checkRouteDefaultGlobalTimeOptions';
|
||||
import Login, { LoginProps } from '../CustomFunctions/Login';
|
||||
|
||||
Cypress.Commands.add('login', Login);
|
||||
Cypress.Commands.add(
|
||||
'checkDefaultGlobalOption',
|
||||
CheckRouteDefaultGlobalTimeOptions,
|
||||
);
|
||||
|
||||
declare global {
|
||||
// eslint-disable-next-line @typescript-eslint/no-namespace
|
||||
namespace Cypress {
|
||||
interface Chainable {
|
||||
login(props: LoginProps): void;
|
||||
checkDefaultGlobalOption(
|
||||
props: CheckRouteDefaultGlobalTimeOptionsProps,
|
||||
): void;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// ***********************************************************
|
||||
// This example support/index.js is processed and
|
||||
// loaded automatically before your test files.
|
||||
//
|
||||
// This is a great place to put global configuration and
|
||||
// behavior that modifies Cypress.
|
||||
//
|
||||
// You can change the location of this file or turn off
|
||||
// automatically serving support files with the
|
||||
// 'supportFile' configuration option.
|
||||
//
|
||||
// You can read more here:
|
||||
// https://on.cypress.io/configuration
|
||||
// ***********************************************************
|
||||
|
||||
// Import commands.js using ES2015 syntax:
|
||||
import './commands';
|
||||
|
||||
// Alternatively you can use CommonJS syntax:
|
||||
// require('./commands')
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"target": "es5",
|
||||
"lib": ["es5", "dom"],
|
||||
"compilerOptions": {
|
||||
"noEmit": true,
|
||||
// be explicit about types included
|
||||
// to avoid clashing with Jest types
|
||||
"types": ["cypress", "@testing-library/cypress", "node"],
|
||||
"isolatedModules": false
|
||||
},
|
||||
"include": ["../node_modules/cypress", "./**/*.ts"]
|
||||
}
|
||||
@@ -25,6 +25,11 @@ const config: Config.InitialOptions = {
|
||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
moduleDirectories: ['node_modules', 'src'],
|
||||
testEnvironmentOptions: {
|
||||
'jest-playwright': {
|
||||
browsers: ['chromium', 'firefox', 'webkit'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
||||
@@ -2,3 +2,4 @@
|
||||
* Adds custom matchers from the react testing library to all tests
|
||||
*/
|
||||
import '@testing-library/jest-dom';
|
||||
import 'jest-styled-components';
|
||||
|
||||
@@ -9,16 +9,17 @@
|
||||
"prettify": "prettier --write .",
|
||||
"lint": "eslint ./src",
|
||||
"lint:fix": "eslint ./src --fix",
|
||||
"cypress:open": "cypress open",
|
||||
"cypress:run": "cypress run",
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch",
|
||||
"postinstall": "yarn husky:configure",
|
||||
"husky:configure": "cd .. && husky install frontend/.husky"
|
||||
"postinstall": "is-ci || yarn husky:configure",
|
||||
"playwright": "playwright test --config=./playwright.config.ts",
|
||||
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
|
||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||
"commitlint": "commitlint --edit $1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.13.0"
|
||||
"node": ">=16.15.0"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
@@ -46,7 +47,6 @@
|
||||
"cross-env": "^7.0.3",
|
||||
"css-loader": "4.3.0",
|
||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||
"cypress": "^8.3.0",
|
||||
"d3": "^6.2.0",
|
||||
"d3-flame-graph": "^3.1.1",
|
||||
"d3-tip": "^0.9.1",
|
||||
@@ -68,7 +68,7 @@
|
||||
"react-dom": "17.0.0",
|
||||
"react-force-graph": "^1.41.0",
|
||||
"react-graph-vis": "^1.0.5",
|
||||
"react-grid-layout": "^1.2.5",
|
||||
"react-grid-layout": "^1.3.4",
|
||||
"react-i18next": "^11.16.1",
|
||||
"react-query": "^3.34.19",
|
||||
"react-redux": "^7.2.2",
|
||||
@@ -109,15 +109,17 @@
|
||||
"@babel/preset-env": "^7.12.17",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.12.17",
|
||||
"@commitlint/cli": "^16.2.4",
|
||||
"@commitlint/config-conventional": "^16.2.4",
|
||||
"@jest/globals": "^27.5.1",
|
||||
"@testing-library/cypress": "^8.0.0",
|
||||
"@playwright/test": "^1.22.0",
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/color": "^3.0.3",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/d3-tip": "^3.5.5",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/jest": "^27.5.1",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/mini-css-extract-plugin": "^2.5.1",
|
||||
"@types/node": "^16.10.3",
|
||||
@@ -136,7 +138,7 @@
|
||||
"@typescript-eslint/parser": "^4.28.2",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "^9.0.0",
|
||||
"compression-webpack-plugin": "9.0.0",
|
||||
"copy-webpack-plugin": "^8.1.0",
|
||||
"critters-webpack-plugin": "^3.0.1",
|
||||
"eslint": "^7.30.0",
|
||||
@@ -155,6 +157,9 @@
|
||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||
"eslint-plugin-sonarjs": "^0.12.0",
|
||||
"husky": "^7.0.4",
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.0",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "^12.3.7",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
@@ -164,11 +169,15 @@
|
||||
"ts-node": "^10.2.1",
|
||||
"typescript-plugin-css-modules": "^3.4.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0",
|
||||
"webpack-cli": "^4.5.0"
|
||||
"webpack-cli": "^4.9.2"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.(js|jsx|ts|tsx)": [
|
||||
"eslint --fix"
|
||||
]
|
||||
},
|
||||
"resolutions": {
|
||||
"@types/react": "17.0.0",
|
||||
"@types/react-dom": "17.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
21
frontend/playwright.config.ts
Normal file
21
frontend/playwright.config.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import { PlaywrightTestConfig } from '@playwright/test';
|
||||
import dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const config: PlaywrightTestConfig = {
|
||||
forbidOnly: !!process.env.CI,
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
preserveOutput: 'always',
|
||||
name: 'Signoz',
|
||||
testDir: './tests',
|
||||
use: {
|
||||
trace: 'retain-on-failure',
|
||||
baseURL: process.env.PLAYWRIGHT_TEST_BASE_URL || 'http://localhost:3301',
|
||||
},
|
||||
updateSnapshots: 'all',
|
||||
fullyParallel: false,
|
||||
quiet: true,
|
||||
};
|
||||
|
||||
export default config;
|
||||
21
frontend/public/locales/en-GB/generalSettings.json
Normal file
21
frontend/public/locales/en-GB/generalSettings.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"total_retention_period": "Total Retention Period",
|
||||
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
|
||||
"status_message": {
|
||||
"success": "Your last call to change retention period to {{total_retention}} {{s3_part}} was successful.",
|
||||
"failed": "Your last call to change retention period to {{total_retention}} {{s3_part}} failed. Please try again.",
|
||||
"pending": "Your last call to change retention period to {{total_retention}} {{s3_part}} is pending. This may take some time.",
|
||||
"s3_part": "and S3 to {{s3_retention}}"
|
||||
},
|
||||
"retention_save_button": {
|
||||
"pending": "Updating {{name}} retention period",
|
||||
"success": "Save"
|
||||
},
|
||||
"retention_request_race_condition": "Your request to change retention period has failed, as another request is still in process.",
|
||||
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
|
||||
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
|
||||
"retention_comparison_error": "Total retention period for {{name}} can’t be lower or equal to the period after which data is moved to s3.",
|
||||
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
|
||||
"retention_confirmation": "Are you sure you want to change the retention period?",
|
||||
"retention_confirmation_description": "This will change the amount of storage needed for saving {{name}}."
|
||||
}
|
||||
@@ -2,5 +2,8 @@
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"organization_settings": "Organization Settings",
|
||||
"my_settings": "My Settings"
|
||||
"my_settings": "My Settings",
|
||||
"overview_metrics": "Overview Metrics",
|
||||
"dbcall_metrics": "Database Calls",
|
||||
"external_metrics": "External Calls"
|
||||
}
|
||||
|
||||
@@ -13,16 +13,5 @@
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"all_errors": "All Exceptions"
|
||||
},
|
||||
"settings": {
|
||||
"total_retention_period": "Total Retention Period",
|
||||
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
|
||||
"retention_success_message": "Congrats. The retention periods for {{name}} has been updated successfully.",
|
||||
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
|
||||
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
|
||||
"retention_comparison_error": "Total retention period for {{name}} can’t be lower or equal to the period after which data is moved to s3.",
|
||||
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
|
||||
"retention_confirmation": "Are you sure you want to change the retention period?",
|
||||
"retention_confirmation_description": "This will change the amount of storage needed for saving metrics & traces."
|
||||
}
|
||||
}
|
||||
|
||||
21
frontend/public/locales/en/generalSettings.json
Normal file
21
frontend/public/locales/en/generalSettings.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"total_retention_period": "Total Retention Period",
|
||||
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
|
||||
"status_message": {
|
||||
"success": "Your last call to change retention period to {{total_retention}} {{s3_part}} was successful.",
|
||||
"failed": "Your last call to change retention period to {{total_retention}} {{s3_part}} failed. Please try again.",
|
||||
"pending": "Your last call to change retention period to {{total_retention}} {{s3_part}} is pending. This may take some time.",
|
||||
"s3_part": "and S3 to {{s3_retention}}"
|
||||
},
|
||||
"retention_save_button": {
|
||||
"pending": "Updating {{name}} retention period",
|
||||
"success": "Save"
|
||||
},
|
||||
"retention_request_race_condition": "Your request to change retention period has failed, as another request is still in process.",
|
||||
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
|
||||
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
|
||||
"retention_comparison_error": "Total retention period for {{name}} can’t be lower or equal to the period after which data is moved to s3.",
|
||||
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
|
||||
"retention_confirmation": "Are you sure you want to change the retention period?",
|
||||
"retention_confirmation_description": "This will change the amount of storage needed for saving {{name}}."
|
||||
}
|
||||
@@ -2,5 +2,8 @@
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"organization_settings": "Organization Settings",
|
||||
"my_settings": "My Settings"
|
||||
"my_settings": "My Settings",
|
||||
"overview_metrics": "Overview Metrics",
|
||||
"dbcall_metrics": "Database Calls",
|
||||
"external_metrics": "External Calls"
|
||||
}
|
||||
|
||||
@@ -13,16 +13,5 @@
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"all_errors": "All Exceptions"
|
||||
},
|
||||
"settings": {
|
||||
"total_retention_period": "Total Retention Period",
|
||||
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
|
||||
"retention_success_message": "Congrats. The retention periods for {{name}} has been updated successfully.",
|
||||
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
|
||||
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
|
||||
"retention_comparison_error": "Total retention period for {{name}} can’t be lower or equal to the period after which data is moved to s3.",
|
||||
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
|
||||
"retention_confirmation": "Are you sure you want to change the retention period?",
|
||||
"retention_confirmation_description": "This will change the amount of storage needed for saving metrics & traces."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import { notification } from 'antd';
|
||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||
import loginApi from 'api/user/login';
|
||||
import { Logout } from 'api/utils';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import ROUTES from 'constants/routes';
|
||||
@@ -103,7 +104,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
history.push(ROUTES.UN_AUTHORIZED);
|
||||
}
|
||||
} else {
|
||||
history.push(ROUTES.SOMETHING_WENT_WRONG);
|
||||
Logout();
|
||||
|
||||
notification.error({
|
||||
message: response.error || t('something_went_wrong'),
|
||||
|
||||
@@ -12,10 +12,7 @@ export const ServiceMetricsPage = Loadable(
|
||||
);
|
||||
|
||||
export const ServiceMapPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "ServiceMapPage" */ 'modules/Servicemap/ServiceMap'
|
||||
),
|
||||
() => import(/* webpackChunkName: "ServiceMapPage" */ 'modules/Servicemap'),
|
||||
);
|
||||
|
||||
export const TraceFilter = Loadable(
|
||||
@@ -27,10 +24,7 @@ export const TraceDetail = Loadable(
|
||||
);
|
||||
|
||||
export const UsageExplorerPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage/UsageExplorerDef'
|
||||
),
|
||||
() => import(/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage'),
|
||||
);
|
||||
|
||||
export const SignupPage = Loadable(
|
||||
|
||||
27
frontend/src/api/metrics/getMetricName.ts
Normal file
27
frontend/src/api/metrics/getMetricName.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { ApiV2Instance as axios } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import {
|
||||
MetricNameProps,
|
||||
MetricNamesPayloadProps,
|
||||
} from 'types/api/metrics/getMetricName';
|
||||
|
||||
export const getMetricName = async (
|
||||
props: MetricNameProps,
|
||||
): Promise<SuccessResponse<MetricNamesPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`/metrics/autocomplete/list?match=${props || ''}`,
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
25
frontend/src/api/metrics/getQueryRange.ts
Normal file
25
frontend/src/api/metrics/getQueryRange.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { ApiV2Instance as axios } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import {
|
||||
MetricRangePayloadProps,
|
||||
MetricsRangeProps,
|
||||
} from 'types/api/metrics/getQueryRange';
|
||||
|
||||
export const getMetricsQueryRange = async (
|
||||
props: MetricsRangeProps,
|
||||
): Promise<SuccessResponse<MetricRangePayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post(`/metrics/query_range`, props);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
@@ -3,17 +3,20 @@ import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import {
|
||||
TagKeyProps,
|
||||
TagKeysPayloadProps,
|
||||
TagValueProps,
|
||||
TagValuesPayloadProps,
|
||||
} from 'types/api/metrics/getResourceAttributes';
|
||||
|
||||
export const getResourceAttributesTagKeys = async (): Promise<
|
||||
SuccessResponse<TagKeysPayloadProps> | ErrorResponse
|
||||
> => {
|
||||
export const getResourceAttributesTagKeys = async (
|
||||
props: TagKeyProps,
|
||||
): Promise<SuccessResponse<TagKeysPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
'/metrics/autocomplete/tagKey?metricName=signoz_calls_total&match=resource_',
|
||||
`/metrics/autocomplete/tagKey?metricName=${props.metricName}${
|
||||
props.match ? `&match=${props.match}` : ''
|
||||
}`,
|
||||
);
|
||||
|
||||
return {
|
||||
@@ -32,7 +35,7 @@ export const getResourceAttributesTagValues = async (
|
||||
): Promise<SuccessResponse<TagValuesPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`/metrics/autocomplete/tagValue?metricName=signoz_calls_total&tagKey=${props}`,
|
||||
`/metrics/autocomplete/tagValue?metricName=${props.metricName}&tagKey=${props.tagKey}`,
|
||||
);
|
||||
|
||||
return {
|
||||
|
||||
@@ -2,13 +2,15 @@ import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/settings/getRetention';
|
||||
import { PayloadProps, Props } from 'types/api/settings/getRetention';
|
||||
|
||||
const getRetention = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
const getRetention = async <T extends Props>(
|
||||
props: T,
|
||||
): Promise<SuccessResponse<PayloadProps<T>> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get<PayloadProps>(`/settings/ttl`);
|
||||
const response = await axios.get<PayloadProps<T>>(
|
||||
`/settings/ttl?type=${props}`,
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
|
||||
@@ -11,7 +11,7 @@ const setRetention = async (
|
||||
const response = await axios.post<PayloadProps>(
|
||||
`/settings/ttl?duration=${props.totalDuration}&type=${props.type}${
|
||||
props.coldStorage
|
||||
? `&coldStorage=${props.coldStorage};toColdDuration=${props.toColdDuration}`
|
||||
? `&coldStorage=${props.coldStorage}&toColdDuration=${props.toColdDuration}`
|
||||
: ''
|
||||
}`,
|
||||
);
|
||||
|
||||
@@ -5,6 +5,7 @@ import history from 'lib/history';
|
||||
import store from 'store';
|
||||
import {
|
||||
LOGGED_IN,
|
||||
UPDATE_ORG,
|
||||
UPDATE_USER,
|
||||
UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
|
||||
UPDATE_USER_ORG_ROLE,
|
||||
@@ -51,5 +52,12 @@ export const Logout = (): void => {
|
||||
},
|
||||
});
|
||||
|
||||
store.dispatch({
|
||||
type: UPDATE_ORG,
|
||||
payload: {
|
||||
org: [],
|
||||
},
|
||||
});
|
||||
|
||||
history.push(ROUTES.LOGIN);
|
||||
};
|
||||
|
||||
@@ -1,38 +1,46 @@
|
||||
import MEditor from '@monaco-editor/react';
|
||||
import MEditor, { EditorProps } from '@monaco-editor/react';
|
||||
import React from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import AppReducer from 'types/reducer/app';
|
||||
|
||||
function Editor({
|
||||
value,
|
||||
language = 'yaml',
|
||||
language,
|
||||
onChange,
|
||||
readOnly = false,
|
||||
}: EditorProps): JSX.Element {
|
||||
readOnly,
|
||||
height,
|
||||
options,
|
||||
}: MEditorProps): JSX.Element {
|
||||
const { isDarkMode } = useSelector<AppState, AppReducer>((state) => state.app);
|
||||
return (
|
||||
<MEditor
|
||||
theme="vs-dark"
|
||||
theme={isDarkMode ? 'vs-dark' : 'vs-light'}
|
||||
language={language}
|
||||
value={value}
|
||||
options={{ fontSize: 16, automaticLayout: true, readOnly }}
|
||||
height="40vh"
|
||||
options={{ fontSize: 16, automaticLayout: true, readOnly, ...options }}
|
||||
height={height}
|
||||
onChange={(newValue): void => {
|
||||
if (newValue) {
|
||||
onChange(newValue);
|
||||
}
|
||||
if (typeof newValue === 'string') onChange(newValue);
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
interface EditorProps {
|
||||
interface MEditorProps {
|
||||
value: string;
|
||||
language?: string;
|
||||
onChange: (value: string) => void;
|
||||
readOnly?: boolean;
|
||||
height?: string;
|
||||
options?: EditorProps['options'];
|
||||
}
|
||||
|
||||
Editor.defaultProps = {
|
||||
language: undefined,
|
||||
language: 'yaml',
|
||||
readOnly: false,
|
||||
height: '40vh',
|
||||
options: {},
|
||||
};
|
||||
|
||||
export default Editor;
|
||||
|
||||
@@ -22,7 +22,6 @@ const getOrCreateLegendList = (
|
||||
listContainer.style.height = '100%';
|
||||
listContainer.style.flexWrap = 'wrap';
|
||||
listContainer.style.justifyContent = 'center';
|
||||
|
||||
legendContainer?.appendChild(listContainer);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { expect } from '@jest/globals';
|
||||
import dayjs from 'dayjs';
|
||||
|
||||
import { convertTimeRange, TIME_UNITS } from '../xAxisConfig';
|
||||
|
||||
@@ -182,11 +182,10 @@ function Graph({
|
||||
};
|
||||
const chartHasData = hasData(data);
|
||||
const chartPlugins = [];
|
||||
if (chartHasData) {
|
||||
chartPlugins.push(legend(name, data.datasets.length > 3));
|
||||
} else {
|
||||
chartPlugins.push(emptyGraph);
|
||||
}
|
||||
|
||||
if (!chartHasData) chartPlugins.push(emptyGraph);
|
||||
chartPlugins.push(legend(name, data.datasets.length > 3));
|
||||
|
||||
lineChartRef.current = new Chart(chartRef.current, {
|
||||
type,
|
||||
data,
|
||||
|
||||
@@ -109,14 +109,14 @@ export const useXAxisTimeUnit = (data: Chart['data']): IAxisTimeConfig => {
|
||||
let minTime = Number.POSITIVE_INFINITY;
|
||||
let maxTime = Number.NEGATIVE_INFINITY;
|
||||
data?.labels?.forEach((timeStamp: unknown): void => {
|
||||
const getTimeStamp = (time: string | number): Date | number | string => {
|
||||
if (typeof timeStamp === 'string') {
|
||||
return Date.parse(timeStamp);
|
||||
const getTimeStamp = (time: Date | number): Date | number | string => {
|
||||
if (time instanceof Date) {
|
||||
return Date.parse(time.toString());
|
||||
}
|
||||
|
||||
return time;
|
||||
};
|
||||
const time = getTimeStamp(timeStamp as string | number);
|
||||
const time = getTimeStamp(timeStamp as Date | number);
|
||||
|
||||
minTime = Math.min(parseInt(time.toString(), 10), minTime);
|
||||
maxTime = Math.max(parseInt(time.toString(), 10), maxTime);
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
* @jest-environment jsdom
|
||||
*/
|
||||
|
||||
import { expect } from '@jest/globals';
|
||||
import { render } from '@testing-library/react';
|
||||
import React from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
import { MemoryRouter } from 'react-router-dom';
|
||||
import store from 'store';
|
||||
|
||||
import NotFound from './index';
|
||||
|
||||
@@ -13,7 +14,9 @@ describe('Not Found page test', () => {
|
||||
it('should render Not Found page without errors', () => {
|
||||
const { asFragment } = render(
|
||||
<MemoryRouter>
|
||||
<NotFound />
|
||||
<Provider store={store}>
|
||||
<NotFound />
|
||||
</Provider>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
expect(asFragment()).toMatchSnapshot();
|
||||
|
||||
@@ -2,8 +2,102 @@
|
||||
|
||||
exports[`Not Found page test should render Not Found page without errors 1`] = `
|
||||
<DocumentFragment>
|
||||
<div
|
||||
class="sc-gsDKAQ cLXpIa"
|
||||
.c3 {
|
||||
border: 2px solid #2f80ed;
|
||||
box-sizing: border-box;
|
||||
border-radius: 10px;
|
||||
width: 400px;
|
||||
background: inherit;
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
font-size: 24px;
|
||||
line-height: 20px;
|
||||
display: -webkit-box;
|
||||
display: -webkit-flex;
|
||||
display: -ms-flexbox;
|
||||
display: flex;
|
||||
-webkit-align-items: center;
|
||||
-webkit-box-align: center;
|
||||
-ms-flex-align: center;
|
||||
align-items: center;
|
||||
-webkit-box-pack: center;
|
||||
-webkit-justify-content: center;
|
||||
-ms-flex-pack: center;
|
||||
justify-content: center;
|
||||
padding-top: 14px;
|
||||
padding-bottom: 14px;
|
||||
color: #2f80ed;
|
||||
}
|
||||
|
||||
.c0 {
|
||||
min-height: 80vh;
|
||||
display: -webkit-box;
|
||||
display: -webkit-flex;
|
||||
display: -ms-flexbox;
|
||||
display: flex;
|
||||
-webkit-flex-direction: column;
|
||||
-ms-flex-direction: column;
|
||||
flex-direction: column;
|
||||
-webkit-box-pack: center;
|
||||
-webkit-justify-content: center;
|
||||
-ms-flex-pack: center;
|
||||
justify-content: center;
|
||||
-webkit-align-items: center;
|
||||
-webkit-box-align: center;
|
||||
-ms-flex-align: center;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.c2 {
|
||||
font-style: normal;
|
||||
font-weight: 300;
|
||||
font-size: 18px;
|
||||
line-height: 20px;
|
||||
display: -webkit-box;
|
||||
display: -webkit-flex;
|
||||
display: -ms-flexbox;
|
||||
display: flex;
|
||||
-webkit-align-items: center;
|
||||
-webkit-box-align: center;
|
||||
-ms-flex-align: center;
|
||||
align-items: center;
|
||||
text-align: center;
|
||||
color: #828282;
|
||||
text-align: center;
|
||||
margin: 0;
|
||||
display: -webkit-box;
|
||||
display: -webkit-flex;
|
||||
display: -ms-flexbox;
|
||||
display: flex;
|
||||
-webkit-box-pack: center;
|
||||
-webkit-justify-content: center;
|
||||
-ms-flex-pack: center;
|
||||
justify-content: center;
|
||||
-webkit-align-items: center;
|
||||
-webkit-box-align: center;
|
||||
-ms-flex-align: center;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.c1 {
|
||||
min-height: 50px;
|
||||
display: -webkit-box;
|
||||
display: -webkit-flex;
|
||||
display: -ms-flexbox;
|
||||
display: flex;
|
||||
-webkit-box-pack: justify;
|
||||
-webkit-justify-content: space-between;
|
||||
-ms-flex-pack: justify;
|
||||
justify-content: space-between;
|
||||
-webkit-flex-direction: column;
|
||||
-ms-flex-direction: column;
|
||||
flex-direction: column;
|
||||
margin-bottom: 30px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
<div
|
||||
class="c0"
|
||||
>
|
||||
<svg
|
||||
fill="none"
|
||||
@@ -272,21 +366,21 @@ exports[`Not Found page test should render Not Found page without errors 1`] = `
|
||||
</defs>
|
||||
</svg>
|
||||
<div
|
||||
class="sc-hKwDye foaleg"
|
||||
class="c1"
|
||||
>
|
||||
<p
|
||||
class="sc-dkPtRN fcyVIq"
|
||||
class="c2"
|
||||
>
|
||||
Ah, seems like we reached a dead end!
|
||||
</p>
|
||||
<p
|
||||
class="sc-dkPtRN fcyVIq"
|
||||
class="c2"
|
||||
>
|
||||
Page Not Found
|
||||
</p>
|
||||
</div>
|
||||
<a
|
||||
class="sc-bdvvtL dbTZkj"
|
||||
class="c3"
|
||||
href="/application"
|
||||
tabindex="0"
|
||||
>
|
||||
|
||||
@@ -27,12 +27,19 @@ function RouteTab({
|
||||
onChange={onChange}
|
||||
destroyInactiveTabPane
|
||||
activeKey={activeKey}
|
||||
animated
|
||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||
{...rest}
|
||||
>
|
||||
{routes.map(
|
||||
({ Component, name }): JSX.Element => (
|
||||
<TabPane tab={name} key={name}>
|
||||
({ Component, name, route }): JSX.Element => (
|
||||
<TabPane
|
||||
tabKey={route}
|
||||
animated
|
||||
destroyInactiveTabPane
|
||||
tab={name}
|
||||
key={name}
|
||||
>
|
||||
<Component />
|
||||
</TabPane>
|
||||
),
|
||||
|
||||
@@ -10,9 +10,11 @@ function TextToolTip({ text, url }: TextToolTipProps): JSX.Element {
|
||||
return (
|
||||
<div>
|
||||
{`${text} `}
|
||||
<a href={url} rel="noopener noreferrer" target="_blank">
|
||||
here
|
||||
</a>
|
||||
{url && (
|
||||
<a href={url} rel="noopener noreferrer" target="_blank">
|
||||
here
|
||||
</a>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
@@ -22,8 +24,11 @@ function TextToolTip({ text, url }: TextToolTipProps): JSX.Element {
|
||||
);
|
||||
}
|
||||
|
||||
TextToolTip.defaultProps = {
|
||||
url: '',
|
||||
};
|
||||
interface TextToolTipProps {
|
||||
url: string;
|
||||
url?: string;
|
||||
text: string;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user