Compare commits
124 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1887fdbfe | ||
|
|
5414a73b40 | ||
|
|
7f116d1597 | ||
|
|
2c1b530aa0 | ||
|
|
231b8467fd | ||
|
|
24910f6a39 | ||
|
|
03bf9afe03 | ||
|
|
fbf047a477 | ||
|
|
afc0559456 | ||
|
|
34e9247562 | ||
|
|
bbd90bff0c | ||
|
|
3e0f5a866d | ||
|
|
fb634303e8 | ||
|
|
5e828bf174 | ||
|
|
8f2ed0e46f | ||
|
|
19b25219f4 | ||
|
|
447700326a | ||
|
|
3ed4fb2b75 | ||
|
|
32750fa2af | ||
|
|
47b0671b27 | ||
|
|
9bc62d83d3 | ||
|
|
5e4cff7ae2 | ||
|
|
271ffbd1a1 | ||
|
|
5b691d26e4 | ||
|
|
6b6070fd45 | ||
|
|
28bf2fe3f7 | ||
|
|
849c3d1156 | ||
|
|
b47a3e0932 | ||
|
|
4427f60708 | ||
|
|
319ca6af07 | ||
|
|
0f59baf740 | ||
|
|
4dfbdd2d63 | ||
|
|
73b5134971 | ||
|
|
20879dcf2e | ||
|
|
e2a5729c5e | ||
|
|
556914f808 | ||
|
|
c8830c9e3a | ||
|
|
865e487fc3 | ||
|
|
36e95332bc | ||
|
|
f934f96dd8 | ||
|
|
148d7d99ed | ||
|
|
43369bdefb | ||
|
|
da5bf3aea0 | ||
|
|
28c8df5e63 | ||
|
|
510815655f | ||
|
|
53d52254cb | ||
|
|
655061212f | ||
|
|
ec11abf54e | ||
|
|
f3fb325a13 | ||
|
|
fa11cd651e | ||
|
|
6f57a0c9b2 | ||
|
|
ae4f75e54b | ||
|
|
1e33f16943 | ||
|
|
b4a9b248cf | ||
|
|
4ce1297856 | ||
|
|
e46ff48b80 | ||
|
|
73e3e061e0 | ||
|
|
992644dff7 | ||
|
|
dea74c5f8a | ||
|
|
d2b107ec7f | ||
|
|
1ebf1a3675 | ||
|
|
02446579a6 | ||
|
|
56fcc0c4a7 | ||
|
|
ce78013646 | ||
|
|
8e7367cae1 | ||
|
|
6f4327bfa1 | ||
|
|
050da9a2a9 | ||
|
|
2c1c0ceea6 | ||
|
|
d69a637275 | ||
|
|
d10b9790dc | ||
|
|
917ef533a3 | ||
|
|
76102dfc7e | ||
|
|
a576092cd4 | ||
|
|
a5fd338a9d | ||
|
|
8a781076e1 | ||
|
|
18fc697b91 | ||
|
|
93b347d25e | ||
|
|
ea5b40c7ea | ||
|
|
cc91242e9a | ||
|
|
e756cefa75 | ||
|
|
da653681cf | ||
|
|
93b5a945a4 | ||
|
|
9ab1093d81 | ||
|
|
b4754053aa | ||
|
|
8fef964485 | ||
|
|
004dda200c | ||
|
|
6a01ce88cb | ||
|
|
ade8cda91c | ||
|
|
3b7484f423 | ||
|
|
959aad252c | ||
|
|
7b70cfb0c4 | ||
|
|
53d5e37b5f | ||
|
|
69821cc13c | ||
|
|
a555c2cb93 | ||
|
|
24d51e3c3a | ||
|
|
368e11e17a | ||
|
|
118ee9dd90 | ||
|
|
30961da59f | ||
|
|
9692b9985a | ||
|
|
98ab64cb94 | ||
|
|
f883d02ff7 | ||
|
|
fff9031bf7 | ||
|
|
32ad4ef571 | ||
|
|
66b423588e | ||
|
|
e0be48a527 | ||
|
|
4143e313da | ||
|
|
e1fbe265d8 | ||
|
|
84002fa123 | ||
|
|
7f2546ec97 | ||
|
|
68b1b8d975 | ||
|
|
ac789ffcf0 | ||
|
|
1272e18672 | ||
|
|
9008d19a7b | ||
|
|
f394f72bfb | ||
|
|
45cb0353e6 | ||
|
|
506c34f385 | ||
|
|
aca67d4f33 | ||
|
|
c00e9f5236 | ||
|
|
8cef9de35c | ||
|
|
4817a17320 | ||
|
|
0055eaf656 | ||
|
|
4b205e61c8 | ||
|
|
77c0237ba1 | ||
|
|
a2acee209c |
5
.github/workflows/README.md
vendored
5
.github/workflows/README.md
vendored
@@ -23,3 +23,8 @@ To run GitHub workflow, a few environment variables needs to add in GitHub secre
|
||||
<td> Docker hub password/token with push permission </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> SONAR_TOKEN </td>
|
||||
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
|
||||
8
.github/workflows/build.yaml
vendored
8
.github/workflows/build.yaml
vendored
@@ -37,6 +37,14 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run Prettier
|
||||
run: cd frontend && npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
continue-on-error: true
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
71
.github/workflows/codeql.yaml
vendored
Normal file
71
.github/workflows/codeql.yaml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, v* ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '32 5 * * 5'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go', 'javascript', 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
6
.github/workflows/commitlint.yml
vendored
6
.github/workflows/commitlint.yml
vendored
@@ -1,5 +1,8 @@
|
||||
name: commitlint
|
||||
on: [pull_request]
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
jobs:
|
||||
lint-commits:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -11,4 +14,5 @@ jobs:
|
||||
- uses: actions/setup-node@v2.1.0
|
||||
# or just "yarn" if you depend on "@commitlint/cli" already
|
||||
- run: yarn add @commitlint/cli
|
||||
- run: yarn run commitlint --from HEAD~${{ github.event.pull_request.commits }} --to HEAD
|
||||
- run: yarn add @commitlint/config-conventional
|
||||
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD
|
||||
|
||||
17
.github/workflows/push.yaml
vendored
17
.github/workflows/push.yaml
vendored
@@ -6,7 +6,13 @@ on:
|
||||
- ^v[0-9]*.[0-9]*.x$
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - main
|
||||
# - v*
|
||||
# paths:
|
||||
# - 'pkg/**'
|
||||
# - 'frontend/**'
|
||||
jobs:
|
||||
get-envs:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -21,6 +27,9 @@ jobs:
|
||||
then
|
||||
echo "tag build"
|
||||
img_tag=${GITHUB_REF#refs/*/v}
|
||||
elif [ ${array[1]} == "pull" ]
|
||||
then
|
||||
img_tag="pull-${{ github.event.number }}"
|
||||
else
|
||||
echo "non tag build"
|
||||
img_tag="latest"
|
||||
@@ -74,7 +83,7 @@ jobs:
|
||||
run: |
|
||||
branch=${GITHUB_REF#refs/*/}
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
then
|
||||
source env-vars
|
||||
make build-push-frontend
|
||||
@@ -115,7 +124,7 @@ jobs:
|
||||
run: |
|
||||
branch=${GITHUB_REF#refs/*/}
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] ||[[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
then
|
||||
source env-vars
|
||||
make build-push-query-service
|
||||
@@ -156,7 +165,7 @@ jobs:
|
||||
run: |
|
||||
branch=${GITHUB_REF#refs/*/}
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
then
|
||||
source env-vars
|
||||
make build-push-flattener
|
||||
|
||||
27
.github/workflows/sonar.yml
vendored
Normal file
27
.github/workflows/sonar.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: sonar
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
jobs:
|
||||
sonar-analysis:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Sonar analysis
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
with:
|
||||
projectBaseDir: frontend
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -1,3 +1,7 @@
|
||||
node_modules
|
||||
yarn.lock
|
||||
package.json
|
||||
|
||||
deploy/docker/environment_tiny/common_test
|
||||
frontend/node_modules
|
||||
frontend/.pnp
|
||||
@@ -10,6 +14,8 @@ frontend/coverage
|
||||
frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
@@ -21,6 +27,10 @@ frontend/npm-debug.log*
|
||||
frontend/yarn-debug.log*
|
||||
frontend/yarn-error.log*
|
||||
frontend/src/constants/env.ts
|
||||
frontend/cypress/**/*.mp4
|
||||
|
||||
# env file for cypress
|
||||
frontend/cypress.env.json
|
||||
|
||||
.idea
|
||||
|
||||
@@ -29,5 +39,7 @@ frontend/src/constants/env.ts
|
||||
**/build
|
||||
**/storage
|
||||
**/locust-scripts/__pycache__/
|
||||
**/__debug_bin
|
||||
|
||||
frontend/*.env
|
||||
pkg/query-service/signoz.db
|
||||
|
||||
@@ -26,7 +26,7 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `<test environment URL>`
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
|
||||
- change baseURL to `<test environment URL>` in file `src/constants/env.ts`
|
||||
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
|
||||
@@ -64,11 +64,12 @@ You can always reach out to `ankit@signoz.io` to understand more about the repo
|
||||
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
### Conventions to follow when submitting commits, PRs
|
||||
|
||||
1. We try to follow https://www.conventionalcommits.org/en/v1.0.0/
|
||||
|
||||
More specifically the commits and PRs should have type specifiers prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
|
||||
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
|
||||
|
||||
2. Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows
|
||||
|
||||
|
||||
160
README.de-de.md
Normal file
160
README.de-de.md
Normal file
@@ -0,0 +1,160 @@
|
||||
<p align="center">
|
||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||
|
||||
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
</p>
|
||||
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
|
||||
|
||||
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
|
||||
|
||||
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
|
||||
|
||||
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
|
||||
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Werde Teil unserer Slack Community
|
||||
|
||||
Sag Hi zu uns auf [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||
|
||||
## Funktionen:
|
||||
|
||||
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
|
||||
- Übersicht der langsamsten Endpunkte deiner Anwendung.
|
||||
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
|
||||
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
|
||||
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
|
||||
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||
|
||||
## Wieso SigNoz?
|
||||
|
||||
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
||||
|
||||
Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDog, NewRelic für Firmen anbieten, die Datenschutz und Sicherheitsbedenken haben, bei der Weitergabe von Kundendaten an Drittanbieter.
|
||||
|
||||
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
|
||||
|
||||
### Unterstützte Programmiersprachen:
|
||||
|
||||
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
|
||||
|
||||
- Java
|
||||
- Python
|
||||
- NodeJS
|
||||
- Go
|
||||
|
||||
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Erste Schritte mit SigNoz
|
||||
|
||||
|
||||
### Bereitstellung mit Docker
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||
|
||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
### Bereitstellung mit Kubernetes und Helm
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Vergleiche mit anderen Lösungen
|
||||
|
||||
### SigNoz vs. Prometheus
|
||||
|
||||
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
|
||||
|
||||
Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzelschritt-Fehlersuchen anzubieten, ähnlich wie es SaaS Anbieter wie Datadog tun, mit der Möglichkeit von erweitertem filtern und aggregieren von Fehlersuchen. Etwas, was in Jaeger aktuell fehlt.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs. Jaeger
|
||||
|
||||
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
|
||||
|
||||
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
|
||||
|
||||
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
|
||||
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Zum Projekt beitragen
|
||||
|
||||
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
## Dokumentation
|
||||
|
||||
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Community
|
||||
|
||||
Werde Teil der [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||
|
||||
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
|
||||
|
||||
Wie immer, danke an unsere großartigen Unterstützer!
|
||||
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
@@ -28,6 +30,8 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
|
||||
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||
|
||||

|
||||
|
||||
@@ -49,7 +53,7 @@ Come say Hi to us on [Slack](https://join.slack.com/t/signoz-community/shared_in
|
||||
- Slowest endpoints in your application
|
||||
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
||||
- Filter traces by service name, operation, latency, error, tags/annotations.
|
||||
- Aggregate metrics on filtered traces. Eg, you can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
|
||||
|
||||
<br /><br />
|
||||
|
||||
159
README.pt-br.md
Normal file
159
README.pt-br.md
Normal file
@@ -0,0 +1,159 @@
|
||||
<p align="center">
|
||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||
|
||||
<p align="center">Monitore seus aplicativos e solucione problemas em seus aplicativos implantados, uma alternativa de código aberto para soluções como DataDog, New Relic, entre outras.</p>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
</p>
|
||||
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentação</b></a> •
|
||||
<a href="https://bit.ly/signoz-slack"><b>Comunidade no Slack</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problemas em seus aplicativos implantados. SigNoz usa rastreamento distribuído para obter visibilidade em sua pilha de software.
|
||||
|
||||
👉 Você pode verificar métricas como latência p99, taxas de erro em seus serviços, requisições às APIs externas e endpoints individuais.
|
||||
|
||||
👉 Você pode encontrar a causa raiz do problema acessando os rastreamentos exatos que estão causando o problema e verificar os quadros detalhados de cada requisição individual.
|
||||
|
||||
👉 Execute agregações em dados de rastreamento para obter métricas de negócios relevantes.
|
||||
|
||||
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Junte-se à nossa comunidade no Slack
|
||||
|
||||
Venha dizer oi para nós no [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||
|
||||
## Funções:
|
||||
|
||||
- Métricas de visão geral do aplicativo, como RPS, latências de percentual 50/90/99 e taxa de erro
|
||||
- Endpoints mais lentos em seu aplicativo
|
||||
- Visualize o rastreamento preciso de requisições de rede para descobrir problemas em serviços downstream, consultas lentas de banco de dados, chamadas para serviços de terceiros, como gateways de pagamento, etc.
|
||||
- Filtre os rastreamentos por nome de serviço, operação, latência, erro, tags / anotações.
|
||||
- Execute agregações em dados de rastreamento (eventos / extensões) para obter métricas de negócios relevantes, como por exemplo, você pode obter a taxa de erro e a latência do 99º percentil de `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Interface de Usuário unificada para métricas e rastreios. Não há necessidade de mudar de Prometheus para Jaeger para depurar problemas.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||
|
||||
## Por que escolher SigNoz?
|
||||
|
||||
Sendo desenvolvedores, achamos irritante contar com fornecedores de SaaS de código fechado para cada pequeno recurso que queríamos. Fornecedores de código fechado costumam surpreendê-lo com enormes contas no final do mês de uso sem qualquer transparência .
|
||||
|
||||
Queríamos fazer uma versão auto-hospedada e de código aberto de ferramentas como DataDog, NewRelic para empresas que têm preocupações com privacidade e segurança em ter dados de clientes indo para serviços de terceiros.
|
||||
|
||||
Ser open source também oferece controle completo de sua configuração, amostragem e tempos de atividade. Você também pode construir módulos sobre o SigNoz para estender recursos específicos do negócio.
|
||||
|
||||
### Linguagens Suportadas:
|
||||
|
||||
Nós apoiamos a biblioteca [OpenTelemetry](https://opentelemetry.io) como a biblioteca que você pode usar para instrumentar seus aplicativos. Em outras palavras, SigNoz oferece suporte a qualquer framework e linguagem que suporte a biblioteca OpenTelemetry. As principais linguagens suportadas incluem:
|
||||
|
||||
- Java
|
||||
- Python
|
||||
- NodeJS
|
||||
- Go
|
||||
|
||||
Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry.io/docs/
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Iniciando
|
||||
|
||||
|
||||
### Implantar usando Docker
|
||||
|
||||
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
|
||||
|
||||
Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
### Implentar no Kubernetes usando Helm
|
||||
|
||||
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/helm_chart) para instalar usando helm charts.
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Comparações com ferramentas similares
|
||||
|
||||
### SigNoz ou Prometheus
|
||||
|
||||
Prometheus é bom se você quiser apenas fazer métricas. Mas se você quiser ter uma experiência perfeita entre métricas e rastreamentos, a experiência atual de unir Prometheus e Jaeger não é ótima.
|
||||
|
||||
Nosso objetivo é fornecer uma interface do usuário integrada entre métricas e rastreamentos - semelhante ao que fornecedores de SaaS como o Datadog fornecem - e fornecer filtragem e agregação avançada sobre rastreamentos, algo que a Jaeger atualmente carece.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz ou Jaeger
|
||||
|
||||
Jaeger só faz rastreamento distribuído. SigNoz faz métricas e rastreia, e também temos gerenciamento de log em nossos planos.
|
||||
|
||||
Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger:
|
||||
|
||||
- A interface de usuário do Jaegar não mostra nenhuma métrica em traces ou em traces filtrados
|
||||
- Jaeger não pode obter agregados em rastros filtrados. Por exemplo, latência p99 de solicitações que possuem tag - customer_type='premium'. Isso pode ser feito facilmente com SigNoz.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Contribuindo
|
||||
|
||||
|
||||
Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz.
|
||||
|
||||
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
## Documentação
|
||||
|
||||
Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver alguma dúvida ou sentir falta de algo, sinta-se à vontade para criar uma issue com a tag `documentation` no GitHub ou entre em contato conosco no canal da comunidade no Slack.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Comunidade
|
||||
|
||||
Junte-se a [comunidade no Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
|
||||
|
||||
Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
Como sempre, obrigado aos nossos incríveis colaboradores!
|
||||
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
517
deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
Normal file
517
deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
Normal file
@@ -0,0 +1,517 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_ssl_port>9440</tcp_ssl_port>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<listen_host>::</listen_host>
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings.. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.yandex/reference_en.html#Distributed
|
||||
-->
|
||||
<remote_servers incl="clickhouse_remote_servers" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
</remote_servers>
|
||||
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/reference_en.html#Data%20replication
|
||||
-->
|
||||
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true<hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
|
||||
<!-- Uncomment if use part_log
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="clickhouse_compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup>
|
||||
<!-- carbon -->
|
||||
<pattern>
|
||||
<regexp>^carbon\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>7776000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- collectd -->
|
||||
<pattern>
|
||||
<regexp>^collectd\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>10</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>43200</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- high -->
|
||||
<pattern>
|
||||
<regexp>^high\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>10</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>172800</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- medium -->
|
||||
<pattern>
|
||||
<regexp>^medium\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- low -->
|
||||
<pattern>
|
||||
<regexp>^low\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>15552000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>31536000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>126144000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>252288000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>315360000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- default -->
|
||||
<default>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
</yandex>
|
||||
113
deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
Normal file
113
deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
Normal file
@@ -0,0 +1,113 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
- 8123:8123
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
restart: always
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- STORAGE=clickhouse
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=2000"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
otel-collector-hostmetrics:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config-hostmetrics.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config-hostmetrics.yaml:/etc/otel-collector-config-hostmetrics.yaml
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -0,0 +1,31 @@
|
||||
CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
spanID String CODEC(ZSTD(1)),
|
||||
parentSpanID String CODEC(ZSTD(1)),
|
||||
serviceName LowCardinality(String) CODEC(ZSTD(1)),
|
||||
name LowCardinality(String) CODEC(ZSTD(1)),
|
||||
kind Int32 CODEC(ZSTD(1)),
|
||||
durationNano UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
tagsKeys Array(String) CODEC(ZSTD(1)),
|
||||
tagsValues Array(String) CODEC(ZSTD(1)),
|
||||
statusCode Int64 CODEC(ZSTD(1)),
|
||||
references String CODEC(ZSTD(1)),
|
||||
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
|
||||
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
|
||||
component Nullable(String) CODEC(ZSTD(1)),
|
||||
dbSystem Nullable(String) CODEC(ZSTD(1)),
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (serviceName, -toUnixTimestamp(timestamp))
|
||||
@@ -0,0 +1,72 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
|
||||
hostmetrics:
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.signoz_otel-collector'
|
||||
type: 'A'
|
||||
port: 8888
|
||||
- job_name: "otel-collector-hostmetrics"
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: ["otel-collector-hostmetrics:8888"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
metrics:
|
||||
receivers: [otlp, prometheus, hostmetrics]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -0,0 +1,47 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
25
deploy/docker-swarm/clickhouse-setup/prometheus.yml
Normal file
25
deploy/docker-swarm/clickhouse-setup/prometheus.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
16
deploy/docker-swarm/common/locust-scripts/locustfile.py
Normal file
16
deploy/docker-swarm/common/locust-scripts/locustfile.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from locust import HttpUser, task, between
|
||||
class UserTasks(HttpUser):
|
||||
wait_time = between(5, 15)
|
||||
|
||||
@task
|
||||
def rachel(self):
|
||||
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
|
||||
@task
|
||||
def trom(self):
|
||||
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
|
||||
@task
|
||||
def japanese(self):
|
||||
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
|
||||
@task
|
||||
def coffee(self):
|
||||
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
|
||||
30
deploy/docker-swarm/common/nginx-config.conf
Normal file
30
deploy/docker-swarm/common/nginx-config.conf
Normal file
@@ -0,0 +1,30 @@
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
0
deploy/docker-swarm/dashboards/.gitkeep
Normal file
0
deploy/docker-swarm/dashboards/.gitkeep
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
@@ -2,7 +2,7 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
image: ${clickhouse_image}
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
@@ -12,6 +12,8 @@ services:
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
@@ -19,24 +21,40 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.3.4
|
||||
container_name: query-service
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
ports:
|
||||
- 9093:9093
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.5.1
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- STORAGE=clickhouse
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.3.4
|
||||
image: signoz/frontend:0.5.2
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
@@ -50,7 +68,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcol:latest
|
||||
image: signoz/otelcontribcol:0.4.2
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -64,11 +82,19 @@ services:
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
|
||||
- "8889:8889" # prometheus exporter
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.4.2
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
|
||||
@@ -19,6 +19,10 @@ CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
|
||||
1
deploy/docker/clickhouse-setup/env/arm64.env
vendored
Normal file
1
deploy/docker/clickhouse-setup/env/arm64.env
vendored
Normal file
@@ -0,0 +1 @@
|
||||
clickhouse_image=altinity/clickhouse-server:21.8.12.1.testingarm
|
||||
1
deploy/docker/clickhouse-setup/env/x86_64.env
vendored
Normal file
1
deploy/docker/clickhouse-setup/env/x86_64.env
vendored
Normal file
@@ -0,0 +1 @@
|
||||
clickhouse_image=yandex/clickhouse-server
|
||||
@@ -1,4 +1,8 @@
|
||||
receivers:
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
@@ -7,10 +11,22 @@ receivers:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
@@ -29,11 +45,23 @@ extensions:
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
@@ -0,0 +1,44 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
26
deploy/docker/clickhouse-setup/prometheus.yml
Normal file
26
deploy/docker/clickhouse-setup/prometheus.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
@@ -16,6 +16,9 @@ server {
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api/alertmanager{
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ services:
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.3.4
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
@@ -163,26 +163,29 @@ services:
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.3.4
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.3.4
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
|
||||
@@ -142,7 +142,7 @@ services:
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.3.4
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
@@ -158,26 +158,30 @@ services:
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.3.4
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.3.4
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
|
||||
@@ -36,6 +36,10 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
package_manager="brew"
|
||||
@@ -160,8 +164,9 @@ install_docker() {
|
||||
echo
|
||||
echo "Amazon Linux detected ... "
|
||||
echo
|
||||
sudo yum install docker
|
||||
sudo service docker start
|
||||
# sudo yum install docker
|
||||
# sudo service docker start
|
||||
sudo amazon-linux-extras install docker
|
||||
else
|
||||
|
||||
yum_cmd="sudo yum --assumeyes --quiet"
|
||||
@@ -266,7 +271,11 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
if is_arm64; then
|
||||
echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
else
|
||||
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
@@ -313,28 +322,30 @@ check_os
|
||||
|
||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
||||
|
||||
echo ""
|
||||
# echo ""
|
||||
|
||||
echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
echo -e "${RED}2) Kafka + Druid setup to handle scale (recommended for production use)\n"
|
||||
read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
do
|
||||
# echo $choice_setup
|
||||
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# echo $choice_setup
|
||||
done
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
setup_type='clickhouse'
|
||||
else
|
||||
setup_type='druid'
|
||||
fi
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# else
|
||||
# setup_type='druid'
|
||||
# fi
|
||||
|
||||
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
@@ -405,7 +416,11 @@ start_docker
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
if is_arm64; then
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
else
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
||||
fi
|
||||
@@ -417,7 +432,11 @@ echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
if is_arm64; then
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
@@ -477,7 +496,7 @@ else
|
||||
echo ""
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
||||
fi
|
||||
@@ -508,4 +527,4 @@ else
|
||||
|
||||
fi
|
||||
|
||||
echo -e "\n🙏 Thank you!\n"
|
||||
echo -e "\n🙏 Thank you!\n"
|
||||
|
||||
@@ -10,12 +10,12 @@ dependencies:
|
||||
version: 0.2.18
|
||||
- name: flattener-processor
|
||||
repository: file://./signoz-charts/flattener-processor
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
- name: query-service
|
||||
repository: file://./signoz-charts/query-service
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
- name: frontend
|
||||
repository: file://./signoz-charts/frontend
|
||||
version: 0.3.4
|
||||
digest: sha256:49f8b43acecdb7cb0fb8ed5af1488bfbef5b66be02e17bb716403750a6894844
|
||||
generated: "2021-08-10T23:37:01.176317+05:30"
|
||||
version: 0.3.6
|
||||
digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228
|
||||
generated: "2021-08-23T12:06:37.231066+05:30"
|
||||
|
||||
@@ -34,10 +34,10 @@ dependencies:
|
||||
version: 0.2.18
|
||||
- name: flattener-processor
|
||||
repository: "file://./signoz-charts/flattener-processor"
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
- name: query-service
|
||||
repository: "file://./signoz-charts/query-service"
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
- name: frontend
|
||||
repository: "file://./signoz-charts/frontend"
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.4
|
||||
appVersion: 0.3.6
|
||||
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.4
|
||||
appVersion: 0.3.6
|
||||
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.4
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.4
|
||||
appVersion: 0.3.6
|
||||
|
||||
2
frontend/.eslintignore
Normal file
2
frontend/.eslintignore
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules
|
||||
build
|
||||
63
frontend/.eslintrc.js
Normal file
63
frontend/.eslintrc.js
Normal file
@@ -0,0 +1,63 @@
|
||||
module.exports = {
|
||||
env: {
|
||||
browser: true,
|
||||
es2021: true,
|
||||
node: true,
|
||||
},
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:@typescript-eslint/eslint-recommended',
|
||||
'plugin:prettier/recommended',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
ecmaVersion: 12,
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: [
|
||||
'react',
|
||||
'@typescript-eslint',
|
||||
'simple-import-sort',
|
||||
'react-hooks',
|
||||
'prettier',
|
||||
],
|
||||
settings: {
|
||||
react: {
|
||||
version: 'latest',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'react/jsx-filename-extension': [
|
||||
'error',
|
||||
{
|
||||
extensions: ['.tsx', '.js', '.jsx'],
|
||||
},
|
||||
],
|
||||
'react/prop-types': 'off',
|
||||
'@typescript-eslint/explicit-function-return-type': 'error',
|
||||
'@typescript-eslint/no-var-requires': 0,
|
||||
'react/no-array-index-key': 2,
|
||||
'linebreak-style': ['error', 'unix'],
|
||||
|
||||
// simple sort error
|
||||
'simple-import-sort/imports': 'error',
|
||||
'simple-import-sort/exports': 'error',
|
||||
|
||||
// hooks
|
||||
'react-hooks/rules-of-hooks': 'error',
|
||||
'react-hooks/exhaustive-deps': 'warn',
|
||||
|
||||
'prettier/prettier': [
|
||||
'error',
|
||||
{},
|
||||
{
|
||||
usePrettierrc: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
@@ -2,6 +2,7 @@
|
||||
"trailingComma": "all",
|
||||
"useTabs": true,
|
||||
"tabWidth": 1,
|
||||
"singleQuote": false,
|
||||
"jsxSingleQuote": false
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": false,
|
||||
"semi": true
|
||||
}
|
||||
|
||||
1
frontend/__mocks__/cssMock.ts
Normal file
1
frontend/__mocks__/cssMock.ts
Normal file
@@ -0,0 +1 @@
|
||||
export default {};
|
||||
8
frontend/bundlesize.config.json
Normal file
8
frontend/bundlesize.config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"path": "./build/**.js",
|
||||
"maxSize": "1.2MB"
|
||||
}
|
||||
]
|
||||
}
|
||||
3
frontend/cypress.json
Normal file
3
frontend/cypress.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"video": false
|
||||
}
|
||||
47
frontend/cypress/CustomFunctions/Login.ts
Normal file
47
frontend/cypress/CustomFunctions/Login.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
const Login = ({ email, name }: LoginProps): void => {
|
||||
const emailInput = cy.findByPlaceholderText('mike@netflix.com');
|
||||
|
||||
emailInput.then((emailInput) => {
|
||||
const element = emailInput[0];
|
||||
// element is present
|
||||
expect(element).not.undefined;
|
||||
expect(element.nodeName).to.be.equal('INPUT');
|
||||
});
|
||||
emailInput.type(email).then((inputElements) => {
|
||||
const inputElement = inputElements[0];
|
||||
const inputValue = inputElement.getAttribute('value');
|
||||
expect(inputValue).to.be.equals(email);
|
||||
});
|
||||
|
||||
const firstNameInput = cy.findByPlaceholderText('Mike');
|
||||
firstNameInput.then((firstNameInput) => {
|
||||
const element = firstNameInput[0];
|
||||
// element is present
|
||||
expect(element).not.undefined;
|
||||
expect(element.nodeName).to.be.equal('INPUT');
|
||||
});
|
||||
|
||||
firstNameInput.type(name).then((inputElements) => {
|
||||
const inputElement = inputElements[0];
|
||||
const inputValue = inputElement.getAttribute('value');
|
||||
expect(inputValue).to.be.equals(name);
|
||||
});
|
||||
|
||||
const gettingStartedButton = cy.findByText('Get Started');
|
||||
gettingStartedButton.click();
|
||||
|
||||
cy
|
||||
.intercept('POST', '/api/v1/user?email*', {
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('defaultUser');
|
||||
|
||||
cy.wait('@defaultUser');
|
||||
};
|
||||
|
||||
export interface LoginProps {
|
||||
email: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export default Login;
|
||||
@@ -0,0 +1,49 @@
|
||||
import {
|
||||
getDefaultOption,
|
||||
getOptions,
|
||||
} from 'container/Header/DateTimeSelection/config';
|
||||
// import { AppState } from 'store/reducers';
|
||||
|
||||
const CheckRouteDefaultGlobalTimeOptions = ({
|
||||
route,
|
||||
}: CheckRouteDefaultGlobalTimeOptionsProps): void => {
|
||||
cy.visit(Cypress.env('baseUrl') + route);
|
||||
|
||||
const allOptions = getOptions(route);
|
||||
|
||||
const defaultValue = getDefaultOption(route);
|
||||
|
||||
const defaultSelectedOption = allOptions.find((e) => e.value === defaultValue);
|
||||
|
||||
expect(defaultSelectedOption).not.undefined;
|
||||
|
||||
cy
|
||||
.findAllByTestId('dropDown')
|
||||
.find('span')
|
||||
.then((el) => {
|
||||
const elements = el.get();
|
||||
|
||||
const item = elements[1];
|
||||
|
||||
expect(defaultSelectedOption?.label).to.be.equals(
|
||||
item.innerText,
|
||||
'Default option is not matching',
|
||||
);
|
||||
});
|
||||
|
||||
// cy
|
||||
// .window()
|
||||
// .its('store')
|
||||
// .invoke('getState')
|
||||
// .then((e: AppState) => {
|
||||
// const { globalTime } = e;
|
||||
// const { maxTime, minTime } = globalTime;
|
||||
// // @TODO match the global min time and max time according to the selected option
|
||||
// });
|
||||
};
|
||||
|
||||
export interface CheckRouteDefaultGlobalTimeOptionsProps {
|
||||
route: string;
|
||||
}
|
||||
|
||||
export default CheckRouteDefaultGlobalTimeOptions;
|
||||
21
frontend/cypress/fixtures/defaultAllChannels.json
Normal file
21
frontend/cypress/fixtures/defaultAllChannels.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 1,
|
||||
"name": "First Channels",
|
||||
"type": "slack",
|
||||
"updated_at": 1638083159246
|
||||
},
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 2,
|
||||
"name": "Second Channels",
|
||||
"type": "Slack",
|
||||
"updated_at": 1638083159246
|
||||
}
|
||||
],
|
||||
"message": "Success"
|
||||
}
|
||||
35
frontend/cypress/fixtures/defaultApp.json
Normal file
35
frontend/cypress/fixtures/defaultApp.json
Normal file
@@ -0,0 +1,35 @@
|
||||
[
|
||||
{
|
||||
"serviceName": "frontend",
|
||||
"p99": 1134610000,
|
||||
"avgDuration": 744523000,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
},
|
||||
{
|
||||
"serviceName": "customer",
|
||||
"p99": 734422400,
|
||||
"avgDuration": 348678530,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
},
|
||||
{
|
||||
"serviceName": "driver",
|
||||
"p99": 239234080,
|
||||
"avgDuration": 204662290,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
}
|
||||
]
|
||||
28
frontend/cypress/fixtures/defaultRules.json
Normal file
28
frontend/cypress/fixtures/defaultRules.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"rules": [
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "First Rule",
|
||||
"id": 1
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Second Rule",
|
||||
"id": 2
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "P0" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Third Rule",
|
||||
"id": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
1
frontend/cypress/fixtures/errorPercentage.json
Normal file
1
frontend/cypress/fixtures/errorPercentage.json
Normal file
@@ -0,0 +1 @@
|
||||
{ "status": "success", "data": { "resultType": "matrix", "result": [] } }
|
||||
29
frontend/cypress/fixtures/requestPerSecond.json
Normal file
29
frontend/cypress/fixtures/requestPerSecond.json
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"resultType": "matrix",
|
||||
"result": [
|
||||
{
|
||||
"metric": {},
|
||||
"values": [
|
||||
[1634741764.961, "0.9"],
|
||||
[1634741824.961, "0.9"],
|
||||
[1634741884.961, "0.8666666666666667"],
|
||||
[1634741944.961, "1"],
|
||||
[1634742004.961, "0.9166666666666666"],
|
||||
[1634742064.961, "0.95"],
|
||||
[1634742124.961, "0.9333333333333333"],
|
||||
[1634742184.961, "0.95"],
|
||||
[1634742244.961, "1.0333333333333334"],
|
||||
[1634742304.961, "0.9333333333333333"],
|
||||
[1634742364.961, "0.9166666666666666"],
|
||||
[1634742424.961, "0.9"],
|
||||
[1634742484.961, "1.0166666666666666"],
|
||||
[1634742544.961, "0.8333333333333334"],
|
||||
[1634742604.961, "0.9166666666666666"],
|
||||
[1634742664.961, "0.95"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
62
frontend/cypress/fixtures/serviceOverview.json
Normal file
62
frontend/cypress/fixtures/serviceOverview.json
Normal file
@@ -0,0 +1,62 @@
|
||||
[
|
||||
{
|
||||
"timestamp": 1634742600000000000,
|
||||
"p50": 720048500,
|
||||
"p95": 924409540,
|
||||
"p99": 974744300,
|
||||
"numCalls": 48,
|
||||
"callRate": 0.8,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742540000000000,
|
||||
"p50": 712614000,
|
||||
"p95": 955580700,
|
||||
"p99": 1045595400,
|
||||
"numCalls": 59,
|
||||
"callRate": 0.98333335,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742480000000000,
|
||||
"p50": 720842000,
|
||||
"p95": 887187600,
|
||||
"p99": 943676860,
|
||||
"numCalls": 53,
|
||||
"callRate": 0.8833333,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742420000000000,
|
||||
"p50": 712287000,
|
||||
"p95": 908505540,
|
||||
"p99": 976507650,
|
||||
"numCalls": 58,
|
||||
"callRate": 0.96666664,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742360000000000,
|
||||
"p50": 697125500,
|
||||
"p95": 975581800,
|
||||
"p99": 1190121900,
|
||||
"numCalls": 54,
|
||||
"callRate": 0.9,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742300000000000,
|
||||
"p50": 711592500,
|
||||
"p95": 880559900,
|
||||
"p99": 1100105500,
|
||||
"numCalls": 40,
|
||||
"callRate": 0.6666667,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
}
|
||||
]
|
||||
9
frontend/cypress/fixtures/topEndPoints.json
Normal file
9
frontend/cypress/fixtures/topEndPoints.json
Normal file
@@ -0,0 +1,9 @@
|
||||
[
|
||||
{
|
||||
"p50": 710824000,
|
||||
"p95": 1003231400,
|
||||
"p99": 1231265500,
|
||||
"numCalls": 299,
|
||||
"name": "HTTP GET /dispatch"
|
||||
}
|
||||
]
|
||||
24
frontend/cypress/integration/appLayout/index.spec.ts
Normal file
24
frontend/cypress/integration/appLayout/index.spec.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
describe('App Layout', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
});
|
||||
|
||||
it('Check the user is in Logged Out State', async () => {
|
||||
cy.location('pathname').then((e) => {
|
||||
expect(e).to.be.equal(ROUTES.SIGN_UP);
|
||||
});
|
||||
});
|
||||
|
||||
it('Logged In State', () => {
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
});
|
||||
52
frontend/cypress/integration/channels/index.spec.ts
Normal file
52
frontend/cypress/integration/channels/index.spec.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
|
||||
|
||||
describe('Channels', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
|
||||
});
|
||||
|
||||
it('Channels', () => {
|
||||
cy
|
||||
.intercept('**channels**', {
|
||||
statusCode: 200,
|
||||
fixture: 'defaultAllChannels',
|
||||
})
|
||||
.as('All Channels');
|
||||
|
||||
cy.wait('@All Channels');
|
||||
|
||||
cy
|
||||
.get('.ant-tabs-tab')
|
||||
.children()
|
||||
.then((e) => {
|
||||
const child = e.get();
|
||||
|
||||
const secondChild = child[1];
|
||||
|
||||
expect(secondChild.outerText).to.be.equals('Alert Channels');
|
||||
|
||||
expect(secondChild.ariaSelected).to.be.equals('true');
|
||||
});
|
||||
|
||||
cy
|
||||
.get('tbody')
|
||||
.should('be.visible')
|
||||
.then((e) => {
|
||||
const allChildren = e.children().get();
|
||||
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
|
||||
|
||||
allChildren.forEach((e, index) => {
|
||||
expect(e.firstChild?.textContent).not.null;
|
||||
expect(e.firstChild?.textContent).to.be.equals(
|
||||
defaultAllChannels.data[index].name,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
44
frontend/cypress/integration/globalTime/default.spec.ts
Normal file
44
frontend/cypress/integration/globalTime/default.spec.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
describe('default time', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
});
|
||||
|
||||
it('Metrics Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.APPLICATION,
|
||||
});
|
||||
});
|
||||
|
||||
it('Dashboard Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.ALL_DASHBOARD,
|
||||
});
|
||||
});
|
||||
|
||||
it('Trace Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.TRACE,
|
||||
});
|
||||
});
|
||||
|
||||
it('Instrumentation Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.INSTRUMENTATION,
|
||||
});
|
||||
});
|
||||
|
||||
it('Service Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.SERVICE_MAP,
|
||||
});
|
||||
});
|
||||
|
||||
it('Settings Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.SETTINGS,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,126 @@
|
||||
/// <reference types="cypress" />
|
||||
import getGlobalDropDownFormatedDate from 'lib/getGlobalDropDownFormatedDate';
|
||||
import { AppState } from 'store/reducers';
|
||||
|
||||
import topEndPoints from '../../fixtures/topEndPoints.json';
|
||||
|
||||
describe('Global Time Metrics Application', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
|
||||
it('Metrics Application', async () => {
|
||||
cy
|
||||
.intercept('GET', '/api/v1/services*', {
|
||||
fixture: 'defaultApp.json',
|
||||
})
|
||||
.as('defaultApps');
|
||||
|
||||
cy.wait('@defaultApps');
|
||||
|
||||
//clicking on frontend
|
||||
cy.get('tr:nth-child(1) > td:first-child').click();
|
||||
|
||||
cy
|
||||
.intercept('GET', '/api/v1/service/top_endpoints*', {
|
||||
fixture: 'topEndPoints.json',
|
||||
})
|
||||
.as('topEndPoints');
|
||||
|
||||
cy
|
||||
.intercept('GET', '/api/v1/service/overview?*', {
|
||||
fixture: 'serviceOverview.json',
|
||||
})
|
||||
.as('serviceOverview');
|
||||
|
||||
cy
|
||||
.intercept(
|
||||
'GET',
|
||||
`/api/v1/query_range?query=sum(rate(signoz_latency_count*`,
|
||||
{
|
||||
fixture: 'requestPerSecond.json',
|
||||
},
|
||||
)
|
||||
.as('requestPerSecond');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.its('store')
|
||||
.invoke('getState')
|
||||
.then((e: AppState) => {
|
||||
const { globalTime } = e;
|
||||
|
||||
const { maxTime, minTime } = globalTime;
|
||||
|
||||
// intercepting metrics application call
|
||||
|
||||
cy.wait('@topEndPoints');
|
||||
cy.wait('@serviceOverview');
|
||||
//TODO add errorPercentage also
|
||||
// cy.wait('@errorPercentage');
|
||||
cy.wait('@requestPerSecond');
|
||||
|
||||
cy
|
||||
.get('tbody tr:first-child td:first-child')
|
||||
.then((el) => {
|
||||
const elements = el.get();
|
||||
|
||||
expect(elements.length).to.be.equals(1);
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
expect(element.innerText).to.be.equals(topEndPoints[0].name);
|
||||
})
|
||||
.click();
|
||||
|
||||
cy
|
||||
.findAllByTestId('dropDown')
|
||||
.find('span.ant-select-selection-item')
|
||||
.then((e) => {
|
||||
const elements = e;
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
const customSelectedTime = element.innerText;
|
||||
|
||||
const startTime = new Date(minTime / 1000000);
|
||||
const endTime = new Date(maxTime / 1000000);
|
||||
|
||||
const startString = getGlobalDropDownFormatedDate(startTime);
|
||||
const endString = getGlobalDropDownFormatedDate(endTime);
|
||||
|
||||
const result = `${startString} - ${endString}`;
|
||||
|
||||
expect(customSelectedTime).to.be.equals(result);
|
||||
});
|
||||
|
||||
cy
|
||||
.findByTestId('dropDown')
|
||||
.click()
|
||||
.then(() => {
|
||||
cy.findByTitle('Last 30 min').click();
|
||||
});
|
||||
|
||||
cy
|
||||
.findByTestId('dropDown')
|
||||
.find('span.ant-select-selection-item')
|
||||
.then((e) => {
|
||||
const elements = e;
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
const selectedTime = element.innerText;
|
||||
|
||||
expect(selectedTime).to.be.equals('Last 30 min');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
67
frontend/cypress/integration/metrics/index.spec.ts
Normal file
67
frontend/cypress/integration/metrics/index.spec.ts
Normal file
@@ -0,0 +1,67 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond';
|
||||
|
||||
import defaultApps from '../../fixtures/defaultApp.json';
|
||||
|
||||
describe('Metrics', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
|
||||
it('Default Apps', () => {
|
||||
cy
|
||||
.intercept('GET', '/api/v1/services*', {
|
||||
fixture: 'defaultApp.json',
|
||||
})
|
||||
.as('defaultApps');
|
||||
|
||||
cy.wait('@defaultApps');
|
||||
|
||||
cy.location().then((e) => {
|
||||
expect(e.pathname).to.be.equals(ROUTES.APPLICATION);
|
||||
|
||||
cy.get('tbody').then((elements) => {
|
||||
const trElements = elements.children();
|
||||
expect(trElements.length).to.be.equal(defaultApps.length);
|
||||
const getChildren = (row: Element): Element => {
|
||||
if (row.children.length === 0) {
|
||||
return row;
|
||||
}
|
||||
return getChildren(row.children[0]);
|
||||
};
|
||||
|
||||
// this is row element
|
||||
trElements.map((index, element) => {
|
||||
const [
|
||||
applicationElement,
|
||||
p99Element,
|
||||
errorRateElement,
|
||||
rpsElement,
|
||||
] = element.children;
|
||||
const applicationName = getChildren(applicationElement).innerHTML;
|
||||
const p99Name = getChildren(p99Element).innerHTML;
|
||||
const errorRateName = getChildren(errorRateElement).innerHTML;
|
||||
const rpsName = getChildren(rpsElement).innerHTML;
|
||||
const { serviceName, p99, errorRate, callRate } = defaultApps[index];
|
||||
expect(applicationName).to.be.equal(serviceName);
|
||||
expect(p99Name).to.be.equal(convertToNanoSecondsToSecond(p99).toString());
|
||||
expect(errorRateName).to.be.equals(
|
||||
parseFloat(errorRate.toString()).toFixed(2),
|
||||
);
|
||||
expect(rpsName).to.be.equals(callRate.toString());
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
export {};
|
||||
128
frontend/cypress/integration/rules/index.spec.ts
Normal file
128
frontend/cypress/integration/rules/index.spec.ts
Normal file
@@ -0,0 +1,128 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultRules from '../../fixtures/defaultRules.json';
|
||||
|
||||
describe('Alerts', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy
|
||||
.intercept('get', '*rules*', {
|
||||
fixture: 'defaultRules',
|
||||
})
|
||||
.as('defaultRules');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
|
||||
|
||||
cy.wait('@defaultRules');
|
||||
});
|
||||
|
||||
it('Edit Rules Page Failure', async () => {
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
statusCode: 500,
|
||||
})
|
||||
.as('Get Rules Error');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Error');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then((e) => {
|
||||
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
|
||||
});
|
||||
|
||||
cy.findByText('Something went wrong').then((e) => {
|
||||
expect(e.length).to.be.equals(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Edit Rules Page Success', async () => {
|
||||
const text = 'this is the sample value';
|
||||
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
statusCode: 200,
|
||||
body: {
|
||||
data: {
|
||||
data: text,
|
||||
},
|
||||
},
|
||||
})
|
||||
.as('Get Rules Success');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Success');
|
||||
|
||||
cy.wait(1000);
|
||||
|
||||
cy.findByText('Save').then((e) => {
|
||||
const [el] = e.get();
|
||||
|
||||
el.click();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('All Rules are rendered correctly', async () => {
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then(({ pathname }) => {
|
||||
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const tarray = e.children().get();
|
||||
|
||||
expect(tarray.length).to.be.equals(3);
|
||||
|
||||
tarray.forEach(({ children }, index) => {
|
||||
const name = children[1]?.textContent;
|
||||
const label = children[2]?.textContent;
|
||||
|
||||
expect(name).to.be.equals(defaultRules.data.rules[index].name);
|
||||
|
||||
const defaultLabels = defaultRules.data.rules[index].labels;
|
||||
|
||||
expect(label).to.be.equals(defaultLabels['severity']);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Rules are Deleted', async () => {
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
body: {
|
||||
data: 'Deleted',
|
||||
message: 'Success',
|
||||
},
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('deleteRules');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
|
||||
firstDelete.click();
|
||||
});
|
||||
|
||||
cy.wait('@deleteRules');
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const trray = e.children().get();
|
||||
expect(trray.length).to.be.equals(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
26
frontend/cypress/plugins/index.ts
Normal file
26
frontend/cypress/plugins/index.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
/// <reference types="cypress" />
|
||||
// ***********************************************************
|
||||
// This example plugins/index.js can be used to load plugins
|
||||
//
|
||||
// You can change the location of this file or turn off loading
|
||||
// the plugins file with the 'pluginsFile' configuration option.
|
||||
//
|
||||
// You can read more here:
|
||||
// https://on.cypress.io/plugins-guide
|
||||
// ***********************************************************
|
||||
|
||||
// This function is called when a project is opened or re-opened (e.g. due to
|
||||
// the project's config changing)
|
||||
|
||||
// cypress/plugins/index.ts
|
||||
|
||||
/// <reference types="cypress" />
|
||||
|
||||
/**
|
||||
* @type {Cypress.PluginConfig}
|
||||
*/
|
||||
module.exports = (): void => {
|
||||
return undefined;
|
||||
};
|
||||
|
||||
export {};
|
||||
24
frontend/cypress/support/commands.ts
Normal file
24
frontend/cypress/support/commands.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import '@testing-library/cypress/add-commands';
|
||||
|
||||
import CheckRouteDefaultGlobalTimeOptions, {
|
||||
CheckRouteDefaultGlobalTimeOptionsProps,
|
||||
} from '../CustomFunctions/checkRouteDefaultGlobalTimeOptions';
|
||||
import Login, { LoginProps } from '../CustomFunctions/Login';
|
||||
|
||||
Cypress.Commands.add('login', Login);
|
||||
Cypress.Commands.add(
|
||||
'checkDefaultGlobalOption',
|
||||
CheckRouteDefaultGlobalTimeOptions,
|
||||
);
|
||||
|
||||
declare global {
|
||||
// eslint-disable-next-line @typescript-eslint/no-namespace
|
||||
namespace Cypress {
|
||||
interface Chainable {
|
||||
login(props: LoginProps): void;
|
||||
checkDefaultGlobalOption(
|
||||
props: CheckRouteDefaultGlobalTimeOptionsProps,
|
||||
): void;
|
||||
}
|
||||
}
|
||||
}
|
||||
20
frontend/cypress/support/index.ts
Normal file
20
frontend/cypress/support/index.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
// ***********************************************************
|
||||
// This example support/index.js is processed and
|
||||
// loaded automatically before your test files.
|
||||
//
|
||||
// This is a great place to put global configuration and
|
||||
// behavior that modifies Cypress.
|
||||
//
|
||||
// You can change the location of this file or turn off
|
||||
// automatically serving support files with the
|
||||
// 'supportFile' configuration option.
|
||||
//
|
||||
// You can read more here:
|
||||
// https://on.cypress.io/configuration
|
||||
// ***********************************************************
|
||||
|
||||
// Import commands.js using ES2015 syntax:
|
||||
import './commands';
|
||||
|
||||
// Alternatively you can use CommonJS syntax:
|
||||
// require('./commands')
|
||||
13
frontend/cypress/tsconfig.json
Normal file
13
frontend/cypress/tsconfig.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"target": "es5",
|
||||
"lib": ["es5", "dom"],
|
||||
"compilerOptions": {
|
||||
"noEmit": true,
|
||||
// be explicit about types included
|
||||
// to avoid clashing with Jest types
|
||||
"types": ["cypress", "@testing-library/cypress", "node"],
|
||||
"isolatedModules": false
|
||||
},
|
||||
"include": ["../node_modules/cypress", "./**/*.ts"]
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
module.exports = {
|
||||
files: ["**/*.{ts,tsx}"],
|
||||
parser: "@typescript-eslint/parser",
|
||||
plugins: ["@typescript-eslint/eslint-plugin"],
|
||||
rules: {
|
||||
"react/jsx-filename-extension": [
|
||||
"error",
|
||||
{
|
||||
extensions: [".tsx"],
|
||||
},
|
||||
],
|
||||
"react/prop-types": "off",
|
||||
"@typescript-eslint/explicit-function-return-type": "error",
|
||||
},
|
||||
extends: [
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"prettier/@typescript-eslint",
|
||||
],
|
||||
env: {
|
||||
browser: true,
|
||||
jest: true,
|
||||
},
|
||||
};
|
||||
@@ -1,28 +0,0 @@
|
||||
const gulp = require("gulp");
|
||||
const gulpless = require("gulp-less");
|
||||
const postcss = require("gulp-postcss");
|
||||
const debug = require("gulp-debug");
|
||||
var csso = require("gulp-csso");
|
||||
const autteoprefixer = require("autoprefixer");
|
||||
const NpmImportPlugin = require("less-plugin-npm-import");
|
||||
|
||||
gulp.task("less", function () {
|
||||
const plugins = [autoprefixer()];
|
||||
|
||||
return gulp
|
||||
.src("src/themes/*-theme.less")
|
||||
.pipe(debug({ title: "Less files:" }))
|
||||
.pipe(
|
||||
gulpless({
|
||||
javascriptEnabled: true,
|
||||
plugins: [new NpmImportPlugin({ prefix: "~" })],
|
||||
}),
|
||||
)
|
||||
.pipe(postcss(plugins))
|
||||
.pipe(
|
||||
csso({
|
||||
debug: true,
|
||||
}),
|
||||
)
|
||||
.pipe(gulp.dest("./public"));
|
||||
});
|
||||
23
frontend/jest.config.ts
Normal file
23
frontend/jest.config.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import type { Config } from '@jest/types';
|
||||
|
||||
const config: Config.InitialOptions = {
|
||||
clearMocks: true,
|
||||
coverageDirectory: 'coverage',
|
||||
coverageReporters: ['text', 'cobertura', 'html', 'json-summary'],
|
||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
||||
modulePathIgnorePatterns: ['dist'],
|
||||
moduleNameMapper: {
|
||||
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||
},
|
||||
notify: true,
|
||||
notifyMode: 'always',
|
||||
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
||||
transform: {
|
||||
'\\.(js|jsx|ts|tsx)?$': 'babel-jest',
|
||||
},
|
||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
moduleDirectories: ['node_modules', 'src'],
|
||||
};
|
||||
|
||||
export default config;
|
||||
4
frontend/jest.setup.ts
Normal file
4
frontend/jest.setup.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
/**
|
||||
* Adds custom matchers from the react testing library to all tests
|
||||
*/
|
||||
import '@testing-library/jest-dom';
|
||||
21590
frontend/package-lock.json
generated
21590
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -5,10 +5,16 @@
|
||||
"main": "webpack.config.js",
|
||||
"scripts": {
|
||||
"dev": "NODE_ENV=development webpack serve --progress",
|
||||
"start": "node scripts/start.js",
|
||||
"build": "webpack --config=webpack.config.prod.js --progress",
|
||||
"prettify": "prettier --write .",
|
||||
"lint": "eslint src"
|
||||
"lint": "eslint . --debug",
|
||||
"lint:fix": "eslint . --fix --debug",
|
||||
"cypress:open": "cypress open",
|
||||
"cypress:run": "cypress run",
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch",
|
||||
"bundle:size": "bundlesize"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.13.0"
|
||||
@@ -17,27 +23,10 @@
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@auth0/auth0-react": "^1.2.0",
|
||||
"@babel/core": "7.12.3",
|
||||
"@pmmmwh/react-refresh-webpack-plugin": "0.4.2",
|
||||
"@svgr/webpack": "5.4.0",
|
||||
"@testing-library/jest-dom": "^5.11.4",
|
||||
"@testing-library/react": "^11.1.0",
|
||||
"@testing-library/user-event": "^12.1.10",
|
||||
"@types/chart.js": "^2.9.28",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/node": "^14.14.7",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/redux": "^3.6.0",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/vis": "^4.21.21",
|
||||
"@typescript-eslint/eslint-plugin": "^4.5.0",
|
||||
"@typescript-eslint/parser": "^4.5.0",
|
||||
"antd": "^4.8.0",
|
||||
"antd": "^4.16.13",
|
||||
"axios": "^0.21.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^26.6.0",
|
||||
@@ -45,70 +34,40 @@
|
||||
"babel-plugin-named-asset-import": "^0.3.7",
|
||||
"babel-preset-minify": "^0.5.1",
|
||||
"babel-preset-react-app": "^10.0.0",
|
||||
"bfj": "^7.0.2",
|
||||
"camelcase": "^6.1.0",
|
||||
"case-sensitive-paths-webpack-plugin": "2.3.0",
|
||||
"chart.js": "^2.9.4",
|
||||
"chart.js": "^3.4.0",
|
||||
"chartjs-adapter-date-fns": "^2.0.0",
|
||||
"css-loader": "4.3.0",
|
||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||
"d3": "^6.2.0",
|
||||
"d3-flame-graph": "^3.1.1",
|
||||
"d3-tip": "^0.9.1",
|
||||
"dotenv": "8.2.0",
|
||||
"eslint": "^7.29.0",
|
||||
"eslint-config-react-app": "^6.0.0",
|
||||
"eslint-plugin-flowtype": "^5.2.0",
|
||||
"eslint-plugin-import": "^2.22.1",
|
||||
"eslint-plugin-jest": "^24.1.0",
|
||||
"eslint-plugin-jsx-a11y": "^6.3.1",
|
||||
"eslint-plugin-react": "^7.21.5",
|
||||
"eslint-plugin-react-hooks": "^4.2.0",
|
||||
"eslint-plugin-testing-library": "^3.9.2",
|
||||
"eslint-webpack-plugin": "^2.1.0",
|
||||
"file-loader": "6.1.1",
|
||||
"fs-extra": "^9.0.1",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.1.0",
|
||||
"identity-obj-proxy": "3.0.0",
|
||||
"jest": "26.6.0",
|
||||
"jest-circus": "26.6.0",
|
||||
"jest-resolve": "26.6.0",
|
||||
"jest-watch-typeahead": "0.6.1",
|
||||
"pnp-webpack-plugin": "1.6.4",
|
||||
"postcss-loader": "3.0.0",
|
||||
"postcss-normalize": "8.0.1",
|
||||
"postcss-preset-env": "6.7.0",
|
||||
"postcss-safe-parser": "5.0.2",
|
||||
"prop-types": "^15.6.2",
|
||||
"mini-css-extract-plugin": "^2.4.5",
|
||||
"monaco-editor": "^0.30.0",
|
||||
"react": "17.0.0",
|
||||
"react-app-polyfill": "^2.0.0",
|
||||
"react-chartjs-2": "^2.11.1",
|
||||
"react-chips": "^0.8.0",
|
||||
"react-css-theme-switcher": "^0.1.6",
|
||||
"react-dev-utils": "^11.0.0",
|
||||
"react-dom": "17.0.0",
|
||||
"react-force-graph": "^1.41.0",
|
||||
"react-graph-vis": "^1.0.5",
|
||||
"react-modal": "^3.12.1",
|
||||
"react-grid-layout": "^1.2.5",
|
||||
"react-redux": "^7.2.2",
|
||||
"react-refresh": "^0.8.3",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-vis": "^1.11.7",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"resolve": "1.18.1",
|
||||
"resolve-url-loader": "^3.1.2",
|
||||
"sass-loader": "8.0.2",
|
||||
"semver": "7.3.2",
|
||||
"style-loader": "1.3.0",
|
||||
"styled-components": "^5.2.1",
|
||||
"terser-webpack-plugin": "4.2.3",
|
||||
"ts-pnp": "1.2.0",
|
||||
"terser-webpack-plugin": "^5.2.5",
|
||||
"ts-node": "^10.2.1",
|
||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||
"typescript": "^4.0.5",
|
||||
"url-loader": "4.1.1",
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "^5.23.0",
|
||||
"webpack-dev-server": "^3.11.2",
|
||||
"webpack-manifest-plugin": "2.2.0",
|
||||
"workbox-webpack-plugin": "5.1.4"
|
||||
"webpack-dev-server": "^4.3.1"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -129,24 +88,50 @@
|
||||
"@babel/preset-env": "^7.12.17",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.12.17",
|
||||
"@testing-library/cypress": "^8.0.0",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/d3-tip": "^3.5.5",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/node": "^16.10.3",
|
||||
"@types/react-grid-layout": "^1.1.2",
|
||||
"@types/uuid": "^8.3.1",
|
||||
"@types/webpack": "^5.28.0",
|
||||
"@types/webpack-dev-server": "^4.3.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
||||
"@typescript-eslint/parser": "^4.28.2",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/redux": "^3.6.0",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/vis": "^4.21.21",
|
||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "^8.0.0",
|
||||
"copy-webpack-plugin": "^7.0.0",
|
||||
"gulp": "^4.0.2",
|
||||
"gulp-csso": "^4.0.1",
|
||||
"gulp-debug": "^4.0.0",
|
||||
"gulp-less": "^4.0.1",
|
||||
"gulp-postcss": "^9.0.0",
|
||||
"bundlesize": "^0.18.1",
|
||||
"compression-webpack-plugin": "^9.0.0",
|
||||
"copy-webpack-plugin": "^8.1.0",
|
||||
"cypress": "^8.3.0",
|
||||
"eslint": "^7.30.0",
|
||||
"eslint-config-prettier": "^8.3.0",
|
||||
"eslint-config-standard": "^16.0.3",
|
||||
"eslint-plugin-import": "^2.23.4",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-prettier": "^4.0.0",
|
||||
"eslint-plugin-promise": "^5.1.0",
|
||||
"eslint-plugin-react": "^7.24.0",
|
||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||
"husky": "4.3.8",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "10.5.3",
|
||||
"lodash-es": "^4.17.21",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"prettier": "2.2.1",
|
||||
"react-hot-loader": "^4.13.0",
|
||||
"react-is": "^17.0.1",
|
||||
"ts-node": "^10.2.1",
|
||||
"webpack-cli": "^4.5.0"
|
||||
}
|
||||
}
|
||||
|
||||
10
frontend/public/css/antd.dark.min.css
vendored
Normal file
10
frontend/public/css/antd.dark.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
10
frontend/public/css/antd.min.css
vendored
Normal file
10
frontend/public/css/antd.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 5.2 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 9.4 KiB |
@@ -1,5 +1,5 @@
|
||||
<svg width="320" height="120" viewBox="0 0 320 120" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M165.742 72.0469C165.742 70.7188 165.273 69.7031 164.336 69C163.398 68.2812 161.711 67.5312 159.273 66.75C156.836 65.9531 154.906 65.1719 153.484 64.4062C149.609 62.3125 147.672 59.4922 147.672 55.9453C147.672 54.1016 148.188 52.4609 149.219 51.0234C150.266 49.5703 151.758 48.4375 153.695 47.625C155.648 46.8125 157.836 46.4062 160.258 46.4062C162.695 46.4062 164.867 46.8516 166.773 47.7422C168.68 48.6172 170.156 49.8594 171.203 51.4688C172.266 53.0781 172.797 54.9062 172.797 56.9531H165.766C165.766 55.3906 165.273 54.1797 164.289 53.3203C163.305 52.4453 161.922 52.0078 160.141 52.0078C158.422 52.0078 157.086 52.375 156.133 53.1094C155.18 53.8281 154.703 54.7812 154.703 55.9688C154.703 57.0781 155.258 58.0078 156.367 58.7578C157.492 59.5078 159.141 60.2109 161.312 60.8672C165.312 62.0703 168.227 63.5625 170.055 65.3438C171.883 67.125 172.797 69.3438 172.797 72C172.797 74.9531 171.68 77.2734 169.445 78.9609C167.211 80.6328 164.203 81.4688 160.422 81.4688C157.797 81.4688 155.406 80.9922 153.25 80.0391C151.094 79.0703 149.445 77.75 148.305 76.0781C147.18 74.4062 146.617 72.4688 146.617 70.2656H153.672C153.672 74.0312 155.922 75.9141 160.422 75.9141C162.094 75.9141 163.398 75.5781 164.336 74.9062C165.273 74.2188 165.742 73.2656 165.742 72.0469ZM184.281 81H177.484V55.6406H184.281V81ZM177.086 49.0781C177.086 48.0625 177.422 47.2266 178.094 46.5703C178.781 45.9141 179.711 45.5859 180.883 45.5859C182.039 45.5859 182.961 45.9141 183.648 46.5703C184.336 47.2266 184.68 48.0625 184.68 49.0781C184.68 50.1094 184.328 50.9531 183.625 51.6094C182.938 52.2656 182.023 52.5938 180.883 52.5938C179.742 52.5938 178.82 52.2656 178.117 51.6094C177.43 50.9531 177.086 50.1094 177.086 49.0781ZM188.898 68.1328C188.898 64.2422 189.82 61.1094 191.664 58.7344C193.523 56.3594 196.023 55.1719 199.164 55.1719C201.945 55.1719 204.109 56.125 205.656 58.0312L205.938 55.6406H212.078V80.1562C212.078 82.375 211.57 84.3047 210.555 85.9453C209.555 87.5859 208.141 88.8359 206.312 89.6953C204.484 90.5547 202.344 90.9844 199.891 90.9844C198.031 90.9844 196.219 90.6094 194.453 89.8594C192.688 89.125 191.352 88.1719 190.445 87L193.445 82.875C195.133 84.7656 197.18 85.7109 199.586 85.7109C201.383 85.7109 202.781 85.2266 203.781 84.2578C204.781 83.3047 205.281 81.9453 205.281 80.1797V78.8203C203.719 80.5859 201.664 81.4688 199.117 81.4688C196.07 81.4688 193.602 80.2812 191.711 77.9062C189.836 75.5156 188.898 72.3516 188.898 68.4141V68.1328ZM195.672 68.625C195.672 70.9219 196.133 72.7266 197.055 74.0391C197.977 75.3359 199.242 75.9844 200.852 75.9844C202.914 75.9844 204.391 75.2109 205.281 73.6641V63C204.375 61.4531 202.914 60.6797 200.898 60.6797C199.273 60.6797 197.992 61.3438 197.055 62.6719C196.133 64 195.672 65.9844 195.672 68.625ZM245.5 81H238.469L224.781 58.5469V81H217.75V46.875H224.781L238.492 69.375V46.875H245.5V81ZM250.141 68.0859C250.141 65.5703 250.625 63.3281 251.594 61.3594C252.562 59.3906 253.953 57.8672 255.766 56.7891C257.594 55.7109 259.711 55.1719 262.117 55.1719C265.539 55.1719 268.328 56.2188 270.484 58.3125C272.656 60.4062 273.867 63.25 274.117 66.8438L274.164 68.5781C274.164 72.4688 273.078 75.5938 270.906 77.9531C268.734 80.2969 265.82 81.4688 262.164 81.4688C258.508 81.4688 255.586 80.2969 253.398 77.9531C251.227 75.6094 250.141 72.4219 250.141 68.3906V68.0859ZM256.914 68.5781C256.914 70.9844 257.367 72.8281 258.273 74.1094C259.18 75.375 260.477 76.0078 262.164 76.0078C263.805 76.0078 265.086 75.3828 266.008 74.1328C266.93 72.8672 267.391 70.8516 267.391 68.0859C267.391 65.7266 266.93 63.8984 266.008 62.6016C265.086 61.3047 263.789 60.6562 262.117 60.6562C260.461 60.6562 259.18 61.3047 258.273 62.6016C257.367 63.8828 256.914 65.875 256.914 68.5781ZM285.742 75.5391H298.141V81H277.094V76.875L289.023 61.125H277.445V55.6406H297.766V59.6484L285.742 75.5391Z" fill="#F2F2F2"/>
|
||||
<path opacity="0.9" d="M69.1393 109.354C41.2841 109.354 19 87.4976 19 60.1771C19 33.1297 41.2841 11 69.1393 11H100.616C110.922 11 119 19.1962 119 29.0316V60.1771C119 87.4976 96.7159 109.354 69.1393 109.354Z" fill="#002B76"/>
|
||||
<path d="M68.746 38.8879C47.9401 38.8879 36.2207 56.1168 35.7319 56.8501C34.5269 58.6568 34.5269 60.9932 35.7328 62.8011C36.2207 63.5331 47.9401 80.762 68.746 80.762C89.5517 80.762 101.271 63.5331 101.76 62.7997C102.965 60.9932 102.965 58.6568 101.759 56.8489C101.271 56.1168 89.5517 38.8879 68.746 38.8879ZM38.6117 60.8807C38.1839 60.2393 38.1839 59.4106 38.6117 58.7691C39.0048 58.1792 47.3488 45.9228 62.2671 42.9827C55.5068 45.5922 50.6965 52.1553 50.6965 59.825C50.6965 67.4946 55.5068 74.0577 62.2671 76.6672C47.3488 73.7271 39.0048 61.4706 38.6117 60.8807ZM60.4284 59.8249C60.4284 60.7806 59.6538 61.5552 58.698 61.5552C57.7423 61.5552 56.9676 60.7806 56.9676 59.8249C56.9676 53.3304 62.2513 48.0468 68.7458 48.0468C69.7015 48.0468 70.4762 48.8213 70.4762 49.7771C70.4762 50.7329 69.7015 51.5074 68.7458 51.5074C64.1595 51.5074 60.4284 55.2384 60.4284 59.8249ZM68.746 64.0317C66.4263 64.0317 64.5392 62.1446 64.5392 59.825C64.5392 57.5053 66.4263 55.6181 68.746 55.6181C71.0658 55.6181 72.9528 57.5052 72.9528 59.825C72.9528 62.1447 71.0658 64.0317 68.746 64.0317ZM98.8802 60.8807C98.4871 61.4706 90.1431 73.7269 75.2249 76.6672C81.9853 74.0575 86.7955 67.4945 86.7955 59.825C86.7955 52.1554 81.9853 45.5923 75.2249 42.9828C90.1431 45.923 98.4871 58.1793 98.8802 58.7692C99.308 59.4106 99.308 60.2393 98.8802 60.8807Z" fill="#F9F2F9"/>
|
||||
<svg width="2130" height="600" viewBox="0 0 2130 600" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M765.131 338.922L805.631 334.984C808.068 348.578 812.99 358.562 820.396 364.938C827.896 371.312 837.974 374.5 850.631 374.5C864.037 374.5 874.115 371.688 880.865 366.062C887.709 360.344 891.131 353.688 891.131 346.094C891.131 341.219 889.677 337.094 886.771 333.719C883.959 330.25 878.99 327.25 871.865 324.719C866.99 323.031 855.881 320.031 838.537 315.719C816.224 310.188 800.568 303.391 791.568 295.328C778.912 283.984 772.584 270.156 772.584 253.844C772.584 243.344 775.537 233.547 781.443 224.453C787.443 215.266 796.021 208.281 807.177 203.5C818.427 198.719 831.974 196.328 847.818 196.328C873.693 196.328 893.146 202 906.177 213.344C919.302 224.688 926.193 239.828 926.849 258.766L885.224 260.594C883.443 250 879.599 242.406 873.693 237.812C867.881 233.125 859.115 230.781 847.396 230.781C835.302 230.781 825.834 233.266 818.99 238.234C814.584 241.422 812.381 245.688 812.381 251.031C812.381 255.906 814.443 260.078 818.568 263.547C823.818 267.953 836.568 272.547 856.818 277.328C877.068 282.109 892.021 287.078 901.677 292.234C911.427 297.297 919.021 304.281 924.459 313.188C929.99 322 932.756 332.922 932.756 345.953C932.756 357.766 929.474 368.828 922.912 379.141C916.349 389.453 907.068 397.141 895.068 402.203C883.068 407.172 868.115 409.656 850.209 409.656C824.146 409.656 804.131 403.656 790.162 391.656C776.193 379.562 767.849 361.984 765.131 338.922ZM967.49 236.406V199.844H1007.01V236.406H967.49ZM967.49 406V256.656H1007.01V406H967.49ZM1043.99 415.844L1089.13 421.328C1089.88 426.578 1091.61 430.188 1094.33 432.156C1098.08 434.969 1103.99 436.375 1112.05 436.375C1122.36 436.375 1130.1 434.828 1135.26 431.734C1138.72 429.672 1141.35 426.344 1143.13 421.75C1144.35 418.469 1144.96 412.422 1144.96 403.609V381.812C1133.15 397.938 1118.24 406 1100.24 406C1080.18 406 1064.29 397.516 1052.57 380.547C1043.38 367.141 1038.79 350.453 1038.79 330.484C1038.79 305.453 1044.79 286.328 1056.79 273.109C1068.88 259.891 1083.88 253.281 1101.79 253.281C1120.26 253.281 1135.49 261.391 1147.49 277.609V256.656H1184.47V390.672C1184.47 408.297 1183.02 421.469 1180.11 430.188C1177.21 438.906 1173.13 445.75 1167.88 450.719C1162.63 455.688 1155.6 459.578 1146.79 462.391C1138.07 465.203 1127.01 466.609 1113.6 466.609C1088.29 466.609 1070.33 462.25 1059.74 453.531C1049.15 444.906 1043.85 433.938 1043.85 420.625C1043.85 419.312 1043.9 417.719 1043.99 415.844ZM1079.29 328.234C1079.29 344.078 1082.33 355.703 1088.43 363.109C1094.61 370.422 1102.21 374.078 1111.21 374.078C1120.86 374.078 1129.02 370.328 1135.68 362.828C1142.33 355.234 1145.66 344.031 1145.66 329.219C1145.66 313.75 1142.47 302.266 1136.1 294.766C1129.72 287.266 1121.66 283.516 1111.91 283.516C1102.44 283.516 1094.61 287.219 1088.43 294.625C1082.33 301.938 1079.29 313.141 1079.29 328.234ZM1224.41 406V199.844H1264.91L1349.29 337.516V199.844H1387.96V406H1346.19L1263.08 271.562V406H1224.41ZM1422.69 329.219C1422.69 316.094 1425.93 303.391 1432.4 291.109C1438.86 278.828 1448.01 269.453 1459.82 262.984C1471.72 256.516 1484.99 253.281 1499.61 253.281C1522.21 253.281 1540.72 260.641 1555.16 275.359C1569.6 289.984 1576.82 308.5 1576.82 330.906C1576.82 353.5 1569.51 372.25 1554.88 387.156C1540.35 401.969 1522.02 409.375 1499.9 409.375C1486.21 409.375 1473.13 406.281 1460.66 400.094C1448.29 393.906 1438.86 384.859 1432.4 372.953C1425.93 360.953 1422.69 346.375 1422.69 329.219ZM1463.19 331.328C1463.19 346.141 1466.71 357.484 1473.74 365.359C1480.77 373.234 1489.44 377.172 1499.76 377.172C1510.07 377.172 1518.69 373.234 1525.63 365.359C1532.66 357.484 1536.18 346.047 1536.18 331.047C1536.18 316.422 1532.66 305.172 1525.63 297.297C1518.69 289.422 1510.07 285.484 1499.76 285.484C1489.44 285.484 1480.77 289.422 1473.74 297.297C1466.71 305.172 1463.19 316.516 1463.19 331.328ZM1592.01 406V375.203L1647.97 310.938C1657.16 300.438 1663.96 292.984 1668.36 288.578C1663.77 288.859 1657.72 289.047 1650.22 289.141L1597.49 289.422V256.656H1720.96V284.641L1663.86 350.453L1643.76 372.25C1654.72 371.594 1661.52 371.266 1664.15 371.266H1725.32V406H1592.01Z" fill="white"/>
|
||||
<path opacity="0.9" d="M296.795 599.499C131.909 599.499 0 468.361 0 304.437C0 142.153 131.909 9.37476 296.795 9.37476H483.116C544.124 9.37476 591.941 58.5518 591.941 117.564V304.437C591.941 468.361 460.032 599.499 296.795 599.499Z" fill="#F25733"/>
|
||||
<path d="M294.467 176.702C171.309 176.702 101.936 280.076 99.0428 284.476C91.91 295.315 91.91 309.334 99.0481 320.181C101.936 324.574 171.309 427.947 294.467 427.947C417.624 427.947 486.997 324.574 489.89 320.173C497.023 309.334 497.024 295.315 489.885 284.468C486.997 280.076 417.624 176.702 294.467 176.702ZM116.09 308.659C113.557 304.811 113.557 299.839 116.09 295.99C118.416 292.45 167.808 218.911 256.115 201.271C216.099 216.928 187.625 256.307 187.625 302.325C187.625 348.342 216.099 387.721 256.115 403.378C167.808 385.737 118.416 312.198 116.09 308.659ZM245.232 302.324C245.232 308.059 240.646 312.706 234.989 312.706C229.331 312.706 224.746 308.059 224.746 302.324C224.746 263.357 256.022 231.655 294.466 231.655C300.123 231.655 304.709 236.303 304.709 242.037C304.709 247.772 300.123 252.419 294.466 252.419C267.317 252.419 245.232 274.806 245.232 302.324ZM294.467 327.565C280.736 327.565 269.565 316.243 269.565 302.325C269.565 288.407 280.736 277.084 294.467 277.084C308.199 277.084 319.369 288.406 319.369 302.325C319.369 316.243 308.199 327.565 294.467 327.565ZM472.843 308.659C470.516 312.198 421.125 385.737 332.818 403.378C372.836 387.72 401.309 348.342 401.309 302.325C401.309 256.307 372.836 216.929 332.818 201.272C421.125 218.913 470.516 292.451 472.843 295.99C475.376 299.839 475.376 304.811 472.843 308.659Z" fill="#F9F2F9"/>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 5.4 KiB After Width: | Height: | Size: 5.6 KiB |
6
frontend/sonar-project.properties
Normal file
6
frontend/sonar-project.properties
Normal file
@@ -0,0 +1,6 @@
|
||||
sonar.organization=signoz
|
||||
sonar.projectKey=SigNoz_signoz
|
||||
|
||||
# relative paths to source directories. More details and properties are described
|
||||
# in https://sonarcloud.io/documentation/project-administration/narrowing-the-focus/
|
||||
sonar.sources=./src
|
||||
46
frontend/src/AppRoutes/index.tsx
Normal file
46
frontend/src/AppRoutes/index.tsx
Normal file
@@ -0,0 +1,46 @@
|
||||
import NotFound from 'components/NotFound';
|
||||
import Spinner from 'components/Spinner';
|
||||
import ROUTES from 'constants/routes';
|
||||
import AppLayout from 'container/AppLayout';
|
||||
import history from 'lib/history';
|
||||
import React, { Suspense } from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { Redirect, Route, Router, Switch } from 'react-router-dom';
|
||||
import { AppState } from 'store/reducers';
|
||||
import AppReducer from 'types/reducer/app';
|
||||
|
||||
import routes from './routes';
|
||||
|
||||
|
||||
const App = (): JSX.Element => {
|
||||
const { isLoggedIn } = useSelector<AppState, AppReducer>((state) => state.app);
|
||||
|
||||
return (
|
||||
<Router history={history}>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }, index) => (
|
||||
<Route key={index} exact={exact} path={path} component={component} />
|
||||
))}
|
||||
<Route
|
||||
path="/"
|
||||
exact
|
||||
render={(): JSX.Element =>
|
||||
isLoggedIn ? (
|
||||
<Redirect to={ROUTES.APPLICATION} />
|
||||
) : (
|
||||
<Redirect to={ROUTES.SIGN_UP} />
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</Router>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
export default App;
|
||||
90
frontend/src/AppRoutes/pageComponents.ts
Normal file
90
frontend/src/AppRoutes/pageComponents.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
import Loadable from 'components/Loadable';
|
||||
|
||||
export const ServicesTablePage = Loadable(
|
||||
() => import(/* webpackChunkName: "ServicesTablePage" */ 'pages/Metrics'),
|
||||
);
|
||||
|
||||
export const ServiceMetricsPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "ServiceMetricsPage" */ 'pages/MetricApplication'
|
||||
),
|
||||
);
|
||||
|
||||
export const ServiceMapPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "ServiceMapPage" */ 'modules/Servicemap/ServiceMap'
|
||||
),
|
||||
);
|
||||
|
||||
export const TraceDetailPages = Loadable(
|
||||
() => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/TraceDetails'),
|
||||
);
|
||||
|
||||
export const TraceGraphPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "TraceGraphPage" */ 'modules/Traces/TraceGraphDef'
|
||||
),
|
||||
);
|
||||
|
||||
export const UsageExplorerPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage/UsageExplorerDef'
|
||||
),
|
||||
);
|
||||
|
||||
export const SignupPage = Loadable(
|
||||
() => import(/* webpackChunkName: "SignupPage" */ 'pages/SignUp'),
|
||||
);
|
||||
|
||||
export const SettingsPage = Loadable(
|
||||
() => import(/* webpackChunkName: "SettingsPage" */ 'pages/Settings'),
|
||||
);
|
||||
|
||||
export const InstrumentationPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "InstrumentationPage" */ 'pages/AddInstrumentation'
|
||||
),
|
||||
);
|
||||
|
||||
export const DashboardPage = Loadable(
|
||||
() => import(/* webpackChunkName: "DashboardPage" */ 'pages/Dashboard'),
|
||||
);
|
||||
|
||||
export const NewDashboardPage = Loadable(
|
||||
() => import(/* webpackChunkName: "New DashboardPage" */ 'pages/NewDashboard'),
|
||||
);
|
||||
|
||||
export const DashboardWidget = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "DashboardWidgetPage" */ 'pages/DashboardWidget'),
|
||||
);
|
||||
|
||||
export const EditRulesPage = Loadable(
|
||||
() => import(/* webpackChunkName: "Alerts Edit Page" */ 'pages/EditRules'),
|
||||
);
|
||||
|
||||
export const ListAllALertsPage = Loadable(
|
||||
() => import(/* webpackChunkName: "All Alerts Page" */ 'pages/AlertList'),
|
||||
);
|
||||
|
||||
export const CreateNewAlerts = Loadable(
|
||||
() => import(/* webpackChunkName: "Create Alerts" */ 'pages/CreateAlert'),
|
||||
);
|
||||
|
||||
export const CreateAlertChannelAlerts = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "Create Channels" */ 'pages/AlertChannelCreate'),
|
||||
);
|
||||
|
||||
export const EditAlertChannelsAlerts = Loadable(
|
||||
() => import(/* webpackChunkName: "Edit Channels" */ 'pages/ChannelsEdit'),
|
||||
);
|
||||
|
||||
export const AllAlertChannels = Loadable(
|
||||
() => import(/* webpackChunkName: "All Channels" */ 'pages/AllAlertChannels'),
|
||||
);
|
||||
125
frontend/src/AppRoutes/routes.ts
Normal file
125
frontend/src/AppRoutes/routes.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import ROUTES from 'constants/routes';
|
||||
import DashboardWidget from 'pages/DashboardWidget';
|
||||
import { RouteProps } from 'react-router-dom';
|
||||
|
||||
import {
|
||||
AllAlertChannels,
|
||||
CreateAlertChannelAlerts,
|
||||
CreateNewAlerts,
|
||||
DashboardPage,
|
||||
EditAlertChannelsAlerts,
|
||||
EditRulesPage,
|
||||
InstrumentationPage,
|
||||
ListAllALertsPage,
|
||||
NewDashboardPage,
|
||||
ServiceMapPage,
|
||||
ServiceMetricsPage,
|
||||
ServicesTablePage,
|
||||
SettingsPage,
|
||||
SignupPage,
|
||||
TraceDetailPages,
|
||||
TraceGraphPage,
|
||||
UsageExplorerPage,
|
||||
} from './pageComponents';
|
||||
|
||||
const routes: AppRoutes[] = [
|
||||
{
|
||||
component: SignupPage,
|
||||
path: ROUTES.SIGN_UP,
|
||||
exact: true,
|
||||
},
|
||||
{
|
||||
component: ServicesTablePage,
|
||||
path: ROUTES.APPLICATION,
|
||||
exact: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.SERVICE_METRICS,
|
||||
exact: true,
|
||||
component: ServiceMetricsPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.SERVICE_MAP,
|
||||
component: ServiceMapPage,
|
||||
exact: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.TRACE_GRAPH,
|
||||
exact: true,
|
||||
component: TraceGraphPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.SETTINGS,
|
||||
exact: true,
|
||||
component: SettingsPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.USAGE_EXPLORER,
|
||||
exact: true,
|
||||
component: UsageExplorerPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.INSTRUMENTATION,
|
||||
exact: true,
|
||||
component: InstrumentationPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALL_DASHBOARD,
|
||||
exact: true,
|
||||
component: DashboardPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.DASHBOARD,
|
||||
exact: true,
|
||||
component: NewDashboardPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.DASHBOARD_WIDGET,
|
||||
exact: true,
|
||||
component: DashboardWidget,
|
||||
},
|
||||
{
|
||||
path: ROUTES.EDIT_ALERTS,
|
||||
exact: true,
|
||||
component: EditRulesPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.LIST_ALL_ALERT,
|
||||
exact: true,
|
||||
component: ListAllALertsPage,
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALERTS_NEW,
|
||||
exact: true,
|
||||
component: CreateNewAlerts,
|
||||
},
|
||||
{
|
||||
path: ROUTES.TRACE,
|
||||
exact: true,
|
||||
component: TraceDetailPages,
|
||||
},
|
||||
{
|
||||
path: ROUTES.CHANNELS_NEW,
|
||||
exact: true,
|
||||
component: CreateAlertChannelAlerts,
|
||||
},
|
||||
{
|
||||
path: ROUTES.CHANNELS_EDIT,
|
||||
exact: true,
|
||||
component: EditAlertChannelsAlerts,
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALL_CHANNELS,
|
||||
exact: true,
|
||||
component: AllAlertChannels,
|
||||
},
|
||||
];
|
||||
|
||||
interface AppRoutes {
|
||||
component: RouteProps['component'];
|
||||
path: RouteProps['path'];
|
||||
exact: RouteProps['exact'];
|
||||
isPrivate?: boolean;
|
||||
}
|
||||
|
||||
export default routes;
|
||||
57
frontend/src/api/ErrorResponseHandler.ts
Normal file
57
frontend/src/api/ErrorResponseHandler.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse } from 'types/api';
|
||||
import { ErrorStatusCode } from 'types/common';
|
||||
|
||||
export const ErrorResponseHandler = (error: AxiosError): ErrorResponse => {
|
||||
if (error.response) {
|
||||
// client received an error response (5xx, 4xx)
|
||||
// making the error status code as standard Error Status Code
|
||||
const statusCode = error.response.status as ErrorStatusCode;
|
||||
|
||||
if (statusCode >= 400 && statusCode < 500) {
|
||||
const { data } = error.response;
|
||||
|
||||
if (statusCode === 404) {
|
||||
return {
|
||||
statusCode,
|
||||
payload: null,
|
||||
error: 'Not Found',
|
||||
message: null,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
statusCode,
|
||||
payload: null,
|
||||
error: data.error,
|
||||
message: null,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
statusCode,
|
||||
payload: null,
|
||||
error: 'Something went wrong',
|
||||
message: null,
|
||||
};
|
||||
}
|
||||
if (error.request) {
|
||||
// client never received a response, or request never left
|
||||
console.error('client never received a response, or request never left');
|
||||
|
||||
return {
|
||||
statusCode: 500,
|
||||
payload: null,
|
||||
error: 'Something went wrong',
|
||||
message: null,
|
||||
};
|
||||
}
|
||||
// anything else
|
||||
console.error('any');
|
||||
return {
|
||||
statusCode: 500,
|
||||
payload: null,
|
||||
error: error.toString(),
|
||||
message: null,
|
||||
};
|
||||
};
|
||||
26
frontend/src/api/alerts/create.ts
Normal file
26
frontend/src/api/alerts/create.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/create';
|
||||
|
||||
const create = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/rules', {
|
||||
data: props.query,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default create;
|
||||
24
frontend/src/api/alerts/delete.ts
Normal file
24
frontend/src/api/alerts/delete.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/delete';
|
||||
|
||||
const deleteAlerts = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.delete(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default deleteAlerts;
|
||||
24
frontend/src/api/alerts/get.ts
Normal file
24
frontend/src/api/alerts/get.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/get';
|
||||
|
||||
const get = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default get;
|
||||
24
frontend/src/api/alerts/getAll.ts
Normal file
24
frontend/src/api/alerts/getAll.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/alerts/getAll';
|
||||
|
||||
const getAll = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.get('/rules');
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getAll;
|
||||
30
frontend/src/api/alerts/getGroup.ts
Normal file
30
frontend/src/api/alerts/getGroup.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import { AxiosAlertManagerInstance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/getGroups';
|
||||
|
||||
const getGroups = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const queryParams = Object.keys(props)
|
||||
.map((e) => `${e}=${props[e]}`)
|
||||
.join('&');
|
||||
|
||||
const response = await AxiosAlertManagerInstance.get(
|
||||
`/alerts/groups?${queryParams}`,
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getGroups;
|
||||
26
frontend/src/api/alerts/put.ts
Normal file
26
frontend/src/api/alerts/put.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/put';
|
||||
|
||||
const put = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.put(`/rules/${props.id}`, {
|
||||
data: props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default put;
|
||||
@@ -1,3 +1,4 @@
|
||||
const apiV1 = "/api/v1/";
|
||||
const apiV1 = '/api/v1/';
|
||||
export const apiV2 = '/api/alertmanager';
|
||||
|
||||
export default apiV1;
|
||||
|
||||
5
frontend/src/api/browser/localstorage/get.ts
Normal file
5
frontend/src/api/browser/localstorage/get.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
const get = (key: string): string | null => {
|
||||
return localStorage.getItem(key);
|
||||
};
|
||||
|
||||
export default get;
|
||||
5
frontend/src/api/browser/localstorage/remove.ts
Normal file
5
frontend/src/api/browser/localstorage/remove.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
const remove = (key: string): void => {
|
||||
window.localStorage.removeItem(key);
|
||||
};
|
||||
|
||||
export default remove;
|
||||
5
frontend/src/api/browser/localstorage/set.ts
Normal file
5
frontend/src/api/browser/localstorage/set.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
const set = (key: string, value: string): void => {
|
||||
localStorage.setItem(key, value);
|
||||
};
|
||||
|
||||
export default set;
|
||||
35
frontend/src/api/channels/createSlack.ts
Normal file
35
frontend/src/api/channels/createSlack.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/createSlack';
|
||||
|
||||
const create = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/channels', {
|
||||
name: props.name,
|
||||
slack_configs: [
|
||||
{
|
||||
send_resolved: true,
|
||||
api_url: props.api_url,
|
||||
channel: props.channel,
|
||||
title: props.title,
|
||||
text: props.text,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default create;
|
||||
24
frontend/src/api/channels/delete.ts
Normal file
24
frontend/src/api/channels/delete.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/delete';
|
||||
|
||||
const deleteChannel = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.delete(`/channels/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default deleteChannel;
|
||||
35
frontend/src/api/channels/editSlack.ts
Normal file
35
frontend/src/api/channels/editSlack.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/editSlack';
|
||||
|
||||
const editSlack = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.put(`/channels/${props.id}`, {
|
||||
name: props.name,
|
||||
slack_configs: [
|
||||
{
|
||||
send_resolved: true,
|
||||
api_url: props.api_url,
|
||||
channel: props.channel,
|
||||
title: props.title,
|
||||
text: props.text,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default editSlack;
|
||||
24
frontend/src/api/channels/get.ts
Normal file
24
frontend/src/api/channels/get.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/channels/get';
|
||||
|
||||
const get = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(`/channels/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default get;
|
||||
24
frontend/src/api/channels/getAll.ts
Normal file
24
frontend/src/api/channels/getAll.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps } from 'types/api/channels/getAll';
|
||||
|
||||
const getAll = async (): Promise<
|
||||
SuccessResponse<PayloadProps> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.get('/channels');
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: 'Success',
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default getAll;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user