Compare commits
314 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87499d1ead | ||
|
|
5fa8686fcf | ||
|
|
dc2db524c7 | ||
|
|
b3545b767a | ||
|
|
55f653d92e | ||
|
|
35f8e133a9 | ||
|
|
58d6487f77 | ||
|
|
708158f50f | ||
|
|
0feab5aa93 | ||
|
|
b49ed913c7 | ||
|
|
419d2da363 | ||
|
|
df2844ea74 | ||
|
|
5e5f0f167f | ||
|
|
a6b05f0a3d | ||
|
|
f69aaa2cfb | ||
|
|
3866f89d3e | ||
|
|
f9ac41b865 | ||
|
|
c5b5bfe540 | ||
|
|
f3c01a5155 | ||
|
|
033b64a62a | ||
|
|
4aabfe7cf5 | ||
|
|
0218f701b2 | ||
|
|
f6d3f95768 | ||
|
|
cb1cd3555b | ||
|
|
ced72f86a4 | ||
|
|
54d5666b92 | ||
|
|
4edc6dbeae | ||
|
|
e203276678 | ||
|
|
8eb2cf144e | ||
|
|
2f7d208eb5 | ||
|
|
70fb5af19f | ||
|
|
fc7a94fa66 | ||
|
|
0077714cb0 | ||
|
|
723c31f6c5 | ||
|
|
1024483e58 | ||
|
|
cbcef2c880 | ||
|
|
0711c8855e | ||
|
|
72cbc1a9e7 | ||
|
|
a9841755a7 | ||
|
|
03e6c33f82 | ||
|
|
3c5aa86ee2 | ||
|
|
06a89b21da | ||
|
|
8c891f0e87 | ||
|
|
49dd5f2ef7 | ||
|
|
83d01e7a0d | ||
|
|
f8e97c9c5c | ||
|
|
b78ade2cf2 | ||
|
|
1b59719891 | ||
|
|
481c4e1271 | ||
|
|
fe0d2a967f | ||
|
|
e77a6f4d7a | ||
|
|
a023a7514e | ||
|
|
3573c0863c | ||
|
|
b444c1e6b1 | ||
|
|
5698628839 | ||
|
|
3596f73fb1 | ||
|
|
5b22490d6d | ||
|
|
39f9fc6900 | ||
|
|
f854cdd9d3 | ||
|
|
011b2167ba | ||
|
|
a5f3a189f8 | ||
|
|
3fdfb51e02 | ||
|
|
43577c7ead | ||
|
|
6661aa7686 | ||
|
|
8d54e3b766 | ||
|
|
6c446226eb | ||
|
|
90b5f88413 | ||
|
|
381a4de88a | ||
|
|
10ebd0cad6 | ||
|
|
6e7f04b492 | ||
|
|
20ac75e3d2 | ||
|
|
d6b75d76ca | ||
|
|
41d3342a42 | ||
|
|
f3cb3b9840 | ||
|
|
08f3b089f4 | ||
|
|
1d8e5b6c0f | ||
|
|
0dcded59e5 | ||
|
|
4799d3147b | ||
|
|
b60b26189f | ||
|
|
c79520c874 | ||
|
|
2cc2a43e17 | ||
|
|
47d42e6a57 | ||
|
|
573d369d4b | ||
|
|
3c151e3adb | ||
|
|
ee1e2b824f | ||
|
|
6f0cf03371 | ||
|
|
b8d228a339 | ||
|
|
c6ba2b4598 | ||
|
|
36adc17a34 | ||
|
|
3e32dabf46 | ||
|
|
74c994fbab | ||
|
|
7844522691 | ||
|
|
12f2f80958 | ||
|
|
7b5ff54f47 | ||
|
|
afc97511af | ||
|
|
ae857d3fcd | ||
|
|
0db2784d6b | ||
|
|
47d1caf078 | ||
|
|
292b3f418e | ||
|
|
4eb533fff8 | ||
|
|
7a10fe2b8c | ||
|
|
4214e36d22 | ||
|
|
b9ab6d3fd4 | ||
|
|
23704b00ce | ||
|
|
266894b0f8 | ||
|
|
4a9847abdd | ||
|
|
ba95ca682b | ||
|
|
5942c758f0 | ||
|
|
e97d0ea51c | ||
|
|
6019b38da5 | ||
|
|
3544ffdcc6 | ||
|
|
be7a687088 | ||
|
|
1066b217cb | ||
|
|
709c286086 | ||
|
|
e4753e6b44 | ||
|
|
6f7999acb2 | ||
|
|
16738ea7e3 | ||
|
|
6b096576ee | ||
|
|
262beef8f9 | ||
|
|
5dc5b2e366 | ||
|
|
43cc6dea92 | ||
|
|
6684640abe | ||
|
|
363fb7bc34 | ||
|
|
dde4485839 | ||
|
|
44598e304d | ||
|
|
4295a2756a | ||
|
|
0a146910d6 | ||
|
|
690ed0f7f1 | ||
|
|
2f0d98ae51 | ||
|
|
fb92ddc822 | ||
|
|
15b0569b56 | ||
|
|
140533b790 | ||
|
|
532f274bd6 | ||
|
|
3200fd054e | ||
|
|
8468cc863e | ||
|
|
71911687bf | ||
|
|
9644297d28 | ||
|
|
faa6fdfcde | ||
|
|
aabf364cc6 | ||
|
|
4b861b2169 | ||
|
|
8d655bf419 | ||
|
|
90cb8ba9a1 | ||
|
|
f508ee7521 | ||
|
|
413caad0d8 | ||
|
|
666f601ecd | ||
|
|
5cdcbef00c | ||
|
|
c2f607ab6b | ||
|
|
2ca10bb87c | ||
|
|
6fb2a6d4c9 | ||
|
|
464589e0ca | ||
|
|
3b94dab3ce | ||
|
|
9f481aacff | ||
|
|
22f2e68db2 | ||
|
|
706f967246 | ||
|
|
1685f0e74f | ||
|
|
74162456e5 | ||
|
|
b798518aa9 | ||
|
|
d7fd1d032b | ||
|
|
a2ac49bfc2 | ||
|
|
33541a2ac0 | ||
|
|
947b5bdefb | ||
|
|
bd7d14b1ca | ||
|
|
43ed49f9d9 | ||
|
|
758b10f1bf | ||
|
|
ab1caf13fc | ||
|
|
96b81817e0 | ||
|
|
bfeceb0ed2 | ||
|
|
c322fc72d9 | ||
|
|
e7b5410c5b | ||
|
|
072693d57d | ||
|
|
a20794040a | ||
|
|
ab4a8dfbea | ||
|
|
fa0a065b95 | ||
|
|
abc8096a39 | ||
|
|
7cff07333f | ||
|
|
5796d6cb8c | ||
|
|
98367fd054 | ||
|
|
ff8df5dc36 | ||
|
|
f0c9f12897 | ||
|
|
5bcf7de440 | ||
|
|
79e96e544f | ||
|
|
871e5ada9e | ||
|
|
0401c27dbc | ||
|
|
57c45f22d6 | ||
|
|
29f1883edd | ||
|
|
5d903b5487 | ||
|
|
1b9683d699 | ||
|
|
65280cf4e1 | ||
|
|
703983a5f9 | ||
|
|
766a2123c5 | ||
|
|
1308f0f15f | ||
|
|
6c634b99d0 | ||
|
|
9856335840 | ||
|
|
e85b405396 | ||
|
|
e2e965bc7f | ||
|
|
7811fdd17a | ||
|
|
0dca1237b9 | ||
|
|
f3d73f6d44 | ||
|
|
187927403a | ||
|
|
0157b47424 | ||
|
|
156905afc7 | ||
|
|
a4878f6430 | ||
|
|
4489df6f39 | ||
|
|
06c075466b | ||
|
|
62be3e7c13 | ||
|
|
bb84960442 | ||
|
|
52199361d5 | ||
|
|
f031845300 | ||
|
|
6f73bb6eca | ||
|
|
fe398bcc49 | ||
|
|
6781c29082 | ||
|
|
eb146491f2 | ||
|
|
ae325ec1ca | ||
|
|
fd6f0574f5 | ||
|
|
b819a90c80 | ||
|
|
a6848f6abd | ||
|
|
abe65975c9 | ||
|
|
5cedd57aa2 | ||
|
|
80a7b9d16d | ||
|
|
9f7b2542ec | ||
|
|
4a4c9f26a2 | ||
|
|
c957c0f757 | ||
|
|
3ff0aa4b4b | ||
|
|
063c9adba6 | ||
|
|
5c3ce146fa | ||
|
|
481bb6e8b8 | ||
|
|
61e6316736 | ||
|
|
f9d1494657 | ||
|
|
0021b4d784 | ||
|
|
a5d5800871 | ||
|
|
16dc90bbd1 | ||
|
|
fff61379fe | ||
|
|
08a415032c | ||
|
|
3783ffdd4c | ||
|
|
a8e4359d95 | ||
|
|
d9e94a4067 | ||
|
|
ae19eaa76a | ||
|
|
fff9954da2 | ||
|
|
a476c68f7e | ||
|
|
fc15aa6f1c | ||
|
|
220edd139a | ||
|
|
4192fd573d | ||
|
|
ca13d80205 | ||
|
|
59121bd932 | ||
|
|
aef935a817 | ||
|
|
f300518d61 | ||
|
|
18b608a1d8 | ||
|
|
738d62c9cf | ||
|
|
38e694cd36 | ||
|
|
1281330c52 | ||
|
|
7b7cca7db7 | ||
|
|
3134e8c1cf | ||
|
|
d00024b64a | ||
|
|
4360cd0397 | ||
|
|
a688b6c60e | ||
|
|
522e73b48e | ||
|
|
ba7e6fcf23 | ||
|
|
eefccafa5b | ||
|
|
05bd6d52f1 | ||
|
|
d60daef171 | ||
|
|
d50530f58c | ||
|
|
6957bd71ca | ||
|
|
ef8b50c19e | ||
|
|
1585065fff | ||
|
|
99c68ddbcd | ||
|
|
b08e859426 | ||
|
|
89fd3e4f55 | ||
|
|
a2492b0135 | ||
|
|
eb8ca5a7ca | ||
|
|
80133240ca | ||
|
|
7d7d112f40 | ||
|
|
add2d19614 | ||
|
|
adfe20e88a | ||
|
|
8d84ce8f06 | ||
|
|
09ea7b9eb5 | ||
|
|
d3b83f5a41 | ||
|
|
77eba9a558 | ||
|
|
43e73e06fe | ||
|
|
840d8b2e49 | ||
|
|
df751c7f38 | ||
|
|
cd07c743b6 | ||
|
|
46e6c34e51 | ||
|
|
42f7905b3b | ||
|
|
a6e68c6519 | ||
|
|
c7e3e6dc4e | ||
|
|
9194ab08b6 | ||
|
|
3ecb2e35ef | ||
|
|
9844dcdfb7 | ||
|
|
ddf5569ce9 | ||
|
|
83455e614e | ||
|
|
831de18464 | ||
|
|
3b2a811f7b | ||
|
|
2c7a5126fd | ||
|
|
87f1597d4e | ||
|
|
916663b4d5 | ||
|
|
b0e355eb64 | ||
|
|
69a39531f0 | ||
|
|
9c9ed741b2 | ||
|
|
e6eaaa660a | ||
|
|
79eef5bb91 | ||
|
|
4d64f1dede | ||
|
|
bf177882e6 | ||
|
|
f6b29999c9 | ||
|
|
75815897b0 | ||
|
|
c9309eecaa | ||
|
|
4264fc0f3a | ||
|
|
ef854910db | ||
|
|
6b8b2ae761 | ||
|
|
a48340a2ea | ||
|
|
e542d2ee09 | ||
|
|
08431131a9 | ||
|
|
1b0ec8ac43 | ||
|
|
2e0ddc7c7f | ||
|
|
858a0cb0de |
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
@@ -5,6 +5,6 @@
|
||||
/frontend/ @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/deploy/ @prashant-shahi
|
||||
/sample-apps/ @prashant-shahi
|
||||
.github @prashant-shahi
|
||||
/deploy/ @SigNoz/devops
|
||||
/sample-apps/ @SigNoz/devops
|
||||
.github @SigNoz/devops
|
||||
|
||||
49
.github/ISSUE_TEMPLATE/request_dashboard.md
vendored
Normal file
49
.github/ISSUE_TEMPLATE/request_dashboard.md
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
name: Request Dashboard
|
||||
about: Request a new dashboard for the SigNoz Dashboards repository
|
||||
title: '[Dashboard Request] '
|
||||
labels: 'dashboard-template'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Use this template to request a new dashboard for the SigNoz Dashboards repository. Providing detailed information will help us understand your needs better and speed up the dashboard creation process. -->
|
||||
|
||||
## Dashboard Name
|
||||
|
||||
<!-- Provide the name for the requested dashboard. Be specific (e.g., "MySQL Monitoring Dashboard"). -->
|
||||
|
||||
## Expected Dashboard Sections and Panels
|
||||
|
||||
(Can be tweaked (add or remove panels/sections) according to available metrics)
|
||||
|
||||
### Section Name
|
||||
|
||||
<!-- Brief description of what this section should display (e.g., "Resource usage metrics for MySQL database"). -->
|
||||
|
||||
### Panel Name
|
||||
|
||||
<!-- Description of the panel (e.g., "Displays current CPU usage, memory usage, etc."). -->
|
||||
|
||||
<!-- - **Example:**
|
||||
- **Section**: Resource Metrics
|
||||
- **Panel**: CPU Usage - Displays the current CPU usage across all database instances.
|
||||
- **Panel**: Memory Usage - Displays the total memory used by the MySQL process. -->
|
||||
|
||||
<!-- Repeat this format for any additional sections or panels. -->
|
||||
|
||||
## Expected Dashboard Variables
|
||||
|
||||
<!-- List any dashboard variables that should be included in the dashboard. Examples could be `deployment.environment`, `hostname`, `region`, etc. -->
|
||||
|
||||
## Additional Comments or Requirements
|
||||
|
||||
<!-- Include any other details, special requirements, or specific visualizations you'd like to request for this dashboard. -->
|
||||
|
||||
## References or Screenshots
|
||||
|
||||
<!-- Add any references or screenshots of requested dashboard if available. -->
|
||||
|
||||
## 📋 Notes
|
||||
|
||||
Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request.
|
||||
8
.github/workflows/build.yaml
vendored
8
.github/workflows/build.yaml
vendored
@@ -8,6 +8,13 @@ on:
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
check-no-ee-references:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run check
|
||||
run: make check-no-ee-references
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -36,7 +43,6 @@ jobs:
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
|
||||
2
.github/workflows/push.yaml
vendored
2
.github/workflows/push.yaml
vendored
@@ -9,7 +9,6 @@ on:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
|
||||
image-build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -151,7 +150,6 @@ jobs:
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
|
||||
3
.github/workflows/staging-deployment.yaml
vendored
3
.github/workflows/staging-deployment.yaml
vendored
@@ -30,6 +30,7 @@ jobs:
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
@@ -51,4 +52,4 @@ jobs:
|
||||
make build-frontend-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
|
||||
3
.github/workflows/testing-deployment.yaml
vendored
3
.github/workflows/testing-deployment.yaml
vendored
@@ -30,6 +30,7 @@ jobs:
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
@@ -52,4 +53,4 @@ jobs:
|
||||
make build-frontend-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -67,3 +67,6 @@ e2e/.auth
|
||||
# go
|
||||
vendor/
|
||||
**/main/**
|
||||
|
||||
# git-town
|
||||
.git-branches.toml
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||
|
||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
@@ -30,6 +30,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
|
||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
|
||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||
|
||||
# 1. General Instructions 📝
|
||||
@@ -37,7 +38,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
|
||||
## 1.1 For Creating Issue(s)
|
||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
|
||||
#### Details like these are incredibly useful:
|
||||
|
||||
@@ -56,7 +57,7 @@ Before making any significant changes and before filing a new issue, please chec
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone 🙌.
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -97,13 +98,14 @@ GitHub provides additional document on [forking a repository](https://help.githu
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
|
||||
|
||||
### Pointers:
|
||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -117,7 +119,7 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
|
||||
|
||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -127,14 +129,13 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
|
||||
|
||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
|
||||
|
||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||
|
||||
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
|
||||
|
||||
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -188,7 +189,7 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
@@ -209,7 +210,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
|
||||
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -309,7 +310,7 @@ Click the button below. A workspace with all required environments will be creat
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -365,10 +366,21 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
**[`^top^`](#)**
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
---
|
||||
|
||||
# 6. Contribute to Dashboards 📈
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
|
||||
|
||||
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
|
||||
|
||||
1. Create a dashboard JSON file.
|
||||
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
|
||||
3. Include screenshots of the dashboard in the `assets/` directory.
|
||||
4. Submit a pull request for review.
|
||||
|
||||
## Other Ways to Contribute
|
||||
|
||||
There are many other ways to get involved with the community and to participate in this project:
|
||||
@@ -379,7 +391,6 @@ There are many other ways to get involved with the community and to participate
|
||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||
- Tell others about the project on Twitter, your blog, etc.
|
||||
|
||||
|
||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
Thank You!
|
||||
|
||||
9
Makefile
9
Makefile
@@ -178,6 +178,15 @@ clear-swarm-ch:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
check-no-ee-references:
|
||||
@echo "Checking for 'ee' package references in 'pkg' directory..."
|
||||
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
|
||||
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
|
||||
exit 1; \
|
||||
else \
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"; \
|
||||
fi
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/app/metrics/...
|
||||
go test ./pkg/query-service/cache/...
|
||||
|
||||
@@ -198,14 +198,14 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
- [Vikrant Gupta](https://github.com/vikrantgupta25)
|
||||
- [Sagar Rajput](https://github.com/SagarRajput-7)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Dhawal Sanghvi](https://github.com/dhawal1248)
|
||||
- [Vibhu Pandey](https://github.com/grandwizard28)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
@@ -23,6 +23,9 @@
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>information</level>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
</formatting>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
@@ -649,12 +652,12 @@
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||
-->
|
||||
<!--
|
||||
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>example01-01-1</replica>
|
||||
</macros>
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
|
||||
@@ -146,11 +146,11 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.49.0
|
||||
image: signoz/query-service:0.55.0
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
# "--prefer-delta=true"
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -186,7 +186,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.48.0
|
||||
image: signoz/frontend:0.55.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -199,7 +199,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.102.1
|
||||
image: signoz/signoz-otel-collector:0.102.10
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@@ -211,6 +211,7 @@ services:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
@@ -237,7 +238,7 @@ services:
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.102.1
|
||||
image: signoz/signoz-schema-migrator:0.102.10
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -36,6 +36,7 @@ receivers:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
@@ -143,6 +144,7 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
@@ -153,6 +155,8 @@ extensions:
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
|
||||
@@ -23,6 +23,9 @@
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>information</level>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
</formatting>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
@@ -649,12 +652,12 @@
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||
-->
|
||||
<!--
|
||||
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>example01-01-1</replica>
|
||||
</macros>
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
|
||||
@@ -66,7 +66,7 @@ services:
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.1}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -81,7 +81,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.102.1
|
||||
image: signoz/signoz-otel-collector:0.102.10
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@@ -93,6 +93,8 @@ services:
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
# "--prefer-delta=true"
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
|
||||
@@ -164,13 +164,13 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.49.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.55.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"-gateway-url=https://api.staging.signoz.cloud"
|
||||
# "--prefer-delta=true"
|
||||
"-gateway-url=https://api.staging.signoz.cloud",
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -204,7 +204,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.49.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.55.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -216,7 +216,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.1}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -230,7 +230,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.1}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.10}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
@@ -244,6 +244,7 @@ services:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
|
||||
@@ -164,12 +164,12 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.49.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.55.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml"
|
||||
# "--prefer-delta=true"
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -203,7 +203,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.49.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.55.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -215,7 +215,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.1}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -229,7 +229,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.1}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.10}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
@@ -243,6 +243,7 @@ services:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
|
||||
@@ -36,6 +36,7 @@ receivers:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
@@ -153,10 +154,13 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# logging: {}
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
@@ -42,6 +47,14 @@ server {
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
proxy_pass http://query-service:8080/ws;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade "websocket";
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
|
||||
44
ee/query-service/anomaly/daily.go
Normal file
44
ee/query-service/anomaly/daily.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*DailyProvider)(nil)
|
||||
|
||||
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &dp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// NewDailyProvider uses the same generic option type
|
||||
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
|
||||
dp := &DailyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(dp)
|
||||
}
|
||||
|
||||
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
FeatureLookup: dp.ff,
|
||||
})
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
44
ee/query-service/anomaly/hourly.go
Normal file
44
ee/query-service/anomaly/hourly.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*HourlyProvider)(nil)
|
||||
|
||||
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &hp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// NewHourlyProvider now uses the generic option type
|
||||
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
|
||||
hp := &HourlyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(hp)
|
||||
}
|
||||
|
||||
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
FeatureLookup: hp.ff,
|
||||
})
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
248
ee/query-service/anomaly/params.go
Normal file
248
ee/query-service/anomaly/params.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
type Seasonality string
|
||||
|
||||
const (
|
||||
SeasonalityHourly Seasonality = "hourly"
|
||||
SeasonalityDaily Seasonality = "daily"
|
||||
SeasonalityWeekly Seasonality = "weekly"
|
||||
)
|
||||
|
||||
func (s Seasonality) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
var (
|
||||
oneWeekOffset = 24 * 7 * time.Hour.Milliseconds()
|
||||
oneDayOffset = 24 * time.Hour.Milliseconds()
|
||||
oneHourOffset = time.Hour.Milliseconds()
|
||||
fiveMinOffset = 5 * time.Minute.Milliseconds()
|
||||
)
|
||||
|
||||
func (s Seasonality) IsValid() bool {
|
||||
switch s {
|
||||
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type GetAnomaliesRequest struct {
|
||||
Params *v3.QueryRangeParamsV3
|
||||
Seasonality Seasonality
|
||||
}
|
||||
|
||||
type GetAnomaliesResponse struct {
|
||||
Results []*v3.Result
|
||||
}
|
||||
|
||||
// anomalyParams is the params for anomaly detection
|
||||
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
|
||||
//
|
||||
// ^ ^
|
||||
// | |
|
||||
// (rounded value for past peiod) + (seasonal growth)
|
||||
//
|
||||
// score = abs(value - prediction) / stddev (current_season_query)
|
||||
type anomalyQueryParams struct {
|
||||
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
|
||||
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
|
||||
// The results obtained from this query are used to compare with predicted values
|
||||
// and to detect anomalies
|
||||
CurrentPeriodQuery *v3.QueryRangeParamsV3
|
||||
// PastPeriodQuery is the query range params for past seasonal period
|
||||
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||
PastPeriodQuery *v3.QueryRangeParamsV3
|
||||
// CurrentSeasonQuery is the query range params for current period (seasonal)
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
|
||||
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
|
||||
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
|
||||
CurrentSeasonQuery *v3.QueryRangeParamsV3
|
||||
// PastSeasonQuery is the query range params for past seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
|
||||
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||
PastSeasonQuery *v3.QueryRangeParamsV3
|
||||
|
||||
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
|
||||
Past2SeasonQuery *v3.QueryRangeParamsV3
|
||||
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
|
||||
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
|
||||
Past3SeasonQuery *v3.QueryRangeParamsV3
|
||||
}
|
||||
|
||||
func updateStepInterval(req *v3.QueryRangeParamsV3) {
|
||||
start := req.Start
|
||||
end := req.End
|
||||
|
||||
req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60))
|
||||
for _, q := range req.CompositeQuery.BuilderQueries {
|
||||
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
|
||||
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
|
||||
q.StepInterval = minStep
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams {
|
||||
start := req.Start
|
||||
end := req.End
|
||||
|
||||
currentPeriodQuery := &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(currentPeriodQuery)
|
||||
|
||||
var pastPeriodStart, pastPeriodEnd int64
|
||||
|
||||
switch seasonality {
|
||||
// for one week period, we fetch the data from the past week with 5 min offset
|
||||
case SeasonalityWeekly:
|
||||
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneWeekOffset
|
||||
// for one day period, we fetch the data from the past day with 5 min offset
|
||||
case SeasonalityDaily:
|
||||
pastPeriodStart = start - oneDayOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneDayOffset
|
||||
// for one hour period, we fetch the data from the past hour with 5 min offset
|
||||
case SeasonalityHourly:
|
||||
pastPeriodStart = start - oneHourOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneHourOffset
|
||||
}
|
||||
|
||||
pastPeriodQuery := &v3.QueryRangeParamsV3{
|
||||
Start: pastPeriodStart,
|
||||
End: pastPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(pastPeriodQuery)
|
||||
|
||||
// seasonality growth trend
|
||||
var currentGrowthPeriodStart, currentGrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
currentGrowthPeriodStart = start - oneWeekOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
case SeasonalityDaily:
|
||||
currentGrowthPeriodStart = start - oneDayOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
case SeasonalityHourly:
|
||||
currentGrowthPeriodStart = start - oneHourOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
}
|
||||
|
||||
currentGrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: currentGrowthPeriodStart,
|
||||
End: currentGrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(currentGrowthQuery)
|
||||
|
||||
var pastGrowthPeriodStart, pastGrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
pastGrowthPeriodStart = start - 2*oneWeekOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
pastGrowthPeriodStart = start - 2*oneDayOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
pastGrowthPeriodStart = start - 2*oneHourOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneHourOffset
|
||||
}
|
||||
|
||||
pastGrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: pastGrowthPeriodStart,
|
||||
End: pastGrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(pastGrowthQuery)
|
||||
|
||||
var past2GrowthPeriodStart, past2GrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past2GrowthPeriodStart = start - 3*oneWeekOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past2GrowthPeriodStart = start - 3*oneDayOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past2GrowthPeriodStart = start - 3*oneHourOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneHourOffset
|
||||
}
|
||||
|
||||
past2GrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: past2GrowthPeriodStart,
|
||||
End: past2GrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(past2GrowthQuery)
|
||||
|
||||
var past3GrowthPeriodStart, past3GrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past3GrowthPeriodStart = start - 4*oneWeekOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past3GrowthPeriodStart = start - 4*oneDayOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past3GrowthPeriodStart = start - 4*oneHourOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneHourOffset
|
||||
}
|
||||
|
||||
past3GrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: past3GrowthPeriodStart,
|
||||
End: past3GrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(past3GrowthQuery)
|
||||
|
||||
return &anomalyQueryParams{
|
||||
CurrentPeriodQuery: currentPeriodQuery,
|
||||
PastPeriodQuery: pastPeriodQuery,
|
||||
CurrentSeasonQuery: currentGrowthQuery,
|
||||
PastSeasonQuery: pastGrowthQuery,
|
||||
Past2SeasonQuery: past2GrowthQuery,
|
||||
Past3SeasonQuery: past3GrowthQuery,
|
||||
}
|
||||
}
|
||||
|
||||
type anomalyQueryResults struct {
|
||||
CurrentPeriodResults []*v3.Result
|
||||
PastPeriodResults []*v3.Result
|
||||
CurrentSeasonResults []*v3.Result
|
||||
PastSeasonResults []*v3.Result
|
||||
Past2SeasonResults []*v3.Result
|
||||
Past3SeasonResults []*v3.Result
|
||||
}
|
||||
9
ee/query-service/anomaly/provider.go
Normal file
9
ee/query-service/anomaly/provider.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
}
|
||||
466
ee/query-service/anomaly/seasonal.go
Normal file
466
ee/query-service/anomaly/seasonal.go
Normal file
@@ -0,0 +1,466 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(srikanthccv): make this configurable?
|
||||
movingAvgWindowSize = 7
|
||||
)
|
||||
|
||||
// BaseProvider is an interface that includes common methods for all provider types
|
||||
type BaseProvider interface {
|
||||
GetBaseSeasonalProvider() *BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// GenericProviderOption is a generic type for provider options
|
||||
type GenericProviderOption[T BaseProvider] func(T)
|
||||
|
||||
func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().cache = cache
|
||||
}
|
||||
}
|
||||
|
||||
func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().keyGenerator = keyGenerator
|
||||
}
|
||||
}
|
||||
|
||||
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().ff = ff
|
||||
}
|
||||
}
|
||||
|
||||
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().reader = reader
|
||||
}
|
||||
}
|
||||
|
||||
type BaseSeasonalProvider struct {
|
||||
querierV2 interfaces.Querier
|
||||
reader interfaces.Reader
|
||||
fluxInterval time.Duration
|
||||
cache cache.Cache
|
||||
keyGenerator cache.KeyGenerator
|
||||
ff interfaces.FeatureLookup
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
|
||||
if !req.Seasonality.IsValid() {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
}
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentPeriodResults, err = postprocess.PostProcessResult(currentPeriodResults, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pastPeriodResults, err = postprocess.PostProcessResult(pastPeriodResults, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentSeasonResults, err = postprocess.PostProcessResult(currentSeasonResults, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pastSeasonResults, err = postprocess.PostProcessResult(pastSeasonResults, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
past2SeasonResults, err = postprocess.PostProcessResult(past2SeasonResults, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
past3SeasonResults, err = postprocess.PostProcessResult(past3SeasonResults, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &anomalyQueryResults{
|
||||
CurrentPeriodResults: currentPeriodResults,
|
||||
PastPeriodResults: pastPeriodResults,
|
||||
CurrentSeasonResults: currentSeasonResults,
|
||||
PastSeasonResults: pastSeasonResults,
|
||||
Past2SeasonResults: past2SeasonResults,
|
||||
Past3SeasonResults: past3SeasonResults,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getMatchingSeries gets the matching series from the query result
|
||||
// for the given series
|
||||
func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series {
|
||||
if queryResult == nil || len(queryResult.Series) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, curr := range queryResult.Series {
|
||||
currLabels := labels.FromMap(curr.Labels)
|
||||
seriesLabels := labels.FromMap(series.Labels)
|
||||
if currLabels.Hash() == seriesLabels.Hash() {
|
||||
return curr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 {
|
||||
if series == nil || len(series.Points) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, smpl := range series.Points {
|
||||
sum += smpl.Value
|
||||
}
|
||||
return sum / float64(len(series.Points))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 {
|
||||
if series == nil || len(series.Points) == 0 {
|
||||
return 0
|
||||
}
|
||||
avg := p.getAvg(series)
|
||||
var sum float64
|
||||
for _, smpl := range series.Points {
|
||||
sum += math.Pow(smpl.Value-avg, 2)
|
||||
}
|
||||
return math.Sqrt(sum / float64(len(series.Points)))
|
||||
}
|
||||
|
||||
// getMovingAvg gets the moving average for the given series
|
||||
// for the given window size and start index
|
||||
func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSize, startIdx int) float64 {
|
||||
if series == nil || len(series.Points) == 0 {
|
||||
return 0
|
||||
}
|
||||
if startIdx >= len(series.Points)-movingAvgWindowSize {
|
||||
startIdx = int(math.Max(0, float64(len(series.Points)-movingAvgWindowSize)))
|
||||
}
|
||||
var sum float64
|
||||
points := series.Points[startIdx:]
|
||||
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
|
||||
sum += points[i].Value
|
||||
}
|
||||
avg := sum / float64(movingAvgWindowSize)
|
||||
return avg
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
|
||||
if len(floats) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, f := range floats {
|
||||
sum += f
|
||||
}
|
||||
return sum / float64(len(floats))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
|
||||
) *v3.Series {
|
||||
predictedSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
// for each point in the series, get the predicted value
|
||||
// the predicted value is the moving average (with window size = 7) of the previous period series
|
||||
// plus the average of the current season series
|
||||
// minus the mean of the past season series, past2 season series and past3 season series
|
||||
for idx, curr := range series.Points {
|
||||
predictedValue :=
|
||||
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
|
||||
p.getAvg(currentSeasonSeries) -
|
||||
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
|
||||
if predictedValue < 0 {
|
||||
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
|
||||
zap.L().Info("predictedSeries",
|
||||
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
|
||||
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
|
||||
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
|
||||
zap.Any("labels", series.Labels),
|
||||
zap.Float64("predictedValue", predictedValue),
|
||||
)
|
||||
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: predictedValue,
|
||||
})
|
||||
}
|
||||
|
||||
return predictedSeries
|
||||
}
|
||||
|
||||
// getBounds gets the upper and lower bounds for the given series
|
||||
// for the given z score threshold
|
||||
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||
func (p *BaseSeasonalProvider) getBounds(
|
||||
series, predictedSeries *v3.Series,
|
||||
zScoreThreshold float64,
|
||||
) (*v3.Series, *v3.Series) {
|
||||
upperBoundSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
lowerBoundSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
for idx, curr := range series.Points {
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||
upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: upperBound,
|
||||
})
|
||||
lowerBoundSeries.Points = append(lowerBoundSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: math.Max(lowerBound, 0),
|
||||
})
|
||||
}
|
||||
|
||||
return upperBoundSeries, lowerBoundSeries
|
||||
}
|
||||
|
||||
// getExpectedValue gets the expected value for the given series
|
||||
// for the given index
|
||||
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
|
||||
func (p *BaseSeasonalProvider) getExpectedValue(
|
||||
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, idx int,
|
||||
) float64 {
|
||||
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
|
||||
}
|
||||
|
||||
// getScore gets the anomaly score for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
// getAnomalyScores gets the anomaly scores for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
|
||||
) *v3.Series {
|
||||
anomalyScoreSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
for idx, curr := range series.Points {
|
||||
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
|
||||
anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: anomalyScore,
|
||||
})
|
||||
}
|
||||
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentPeriodResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.CurrentPeriodResults {
|
||||
currentPeriodResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastPeriodResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.PastPeriodResults {
|
||||
pastPeriodResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
currentSeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.CurrentSeasonResults {
|
||||
currentSeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastSeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.PastSeasonResults {
|
||||
pastSeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
past2SeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.Past2SeasonResults {
|
||||
past2SeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
past3SeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.Past3SeasonResults {
|
||||
past3SeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
for _, result := range currentPeriodResultsMap {
|
||||
funcs := req.Params.CompositeQuery.BuilderQueries[result.QueryName].Functions
|
||||
|
||||
var zScoreThreshold float64
|
||||
for _, f := range funcs {
|
||||
if f.Name == v3.FunctionNameAnomaly {
|
||||
value, ok := f.NamedArgs["z_score_threshold"]
|
||||
if ok {
|
||||
zScoreThreshold = value.(float64)
|
||||
} else {
|
||||
zScoreThreshold = 3
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past2SeasonResult, ok := past2SeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past3SeasonResult, ok := past3SeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, series := range result.Series {
|
||||
stdDev := p.getStdDev(series)
|
||||
zap.L().Info("stdDev", zap.Float64("stdDev", stdDev), zap.Any("labels", series.Labels))
|
||||
|
||||
pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series)
|
||||
currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series)
|
||||
pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series)
|
||||
past2SeasonSeries := p.getMatchingSeries(past2SeasonResult, series)
|
||||
past3SeasonSeries := p.getMatchingSeries(past3SeasonResult, series)
|
||||
|
||||
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
zap.L().Info("getAvg", zap.Float64("prevSeriesAvg", prevSeriesAvg), zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg), zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg), zap.Float64("past2SeasonSeriesAvg", past2SeasonSeriesAvg), zap.Float64("past3SeasonSeriesAvg", past3SeasonSeriesAvg), zap.Any("labels", series.Labels))
|
||||
|
||||
predictedSeries := p.getPredictedSeries(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
result.PredictedSeries = append(result.PredictedSeries, predictedSeries)
|
||||
|
||||
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||
series,
|
||||
predictedSeries,
|
||||
zScoreThreshold,
|
||||
)
|
||||
result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries)
|
||||
result.LowerBoundSeries = append(result.LowerBoundSeries, lowerBoundSeries)
|
||||
|
||||
anomalyScoreSeries := p.getAnomalyScores(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries)
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*v3.Result, 0, len(currentPeriodResultsMap))
|
||||
for _, result := range currentPeriodResultsMap {
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return &GetAnomaliesResponse{
|
||||
Results: results,
|
||||
}, nil
|
||||
}
|
||||
43
ee/query-service/anomaly/weekly.go
Normal file
43
ee/query-service/anomaly/weekly.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*WeeklyProvider)(nil)
|
||||
|
||||
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &wp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
|
||||
wp := &WeeklyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(wp)
|
||||
}
|
||||
|
||||
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
FeatureLookup: wp.ff,
|
||||
})
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
@@ -38,7 +38,8 @@ type APIHandlerOptions struct {
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -63,6 +64,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -175,6 +177,8 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
am.ViewAccess(ah.listLicensesV2)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
|
||||
// Gateway
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
@@ -29,6 +31,10 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
|
||||
|
||||
// Get the dashboard UUID from the request
|
||||
uuid := mux.Vars(r)["uuid"]
|
||||
if strings.HasPrefix(uuid,"integration") {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
|
||||
return
|
||||
}
|
||||
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||
|
||||
@@ -1,17 +1,48 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
featureSet, err := ah.FF().GetFeatureFlags()
|
||||
if err != nil {
|
||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if constants.FetchFeatures == "true" {
|
||||
zap.L().Debug("fetching license")
|
||||
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to fetch license", zap.Error(err))
|
||||
} else if license == nil {
|
||||
zap.L().Debug("no active license found")
|
||||
} else {
|
||||
licenseKey := license.Key
|
||||
|
||||
zap.L().Debug("fetching zeus features")
|
||||
zeusFeatures, err := fetchZeusFeatures(constants.ZeusFeaturesURL, licenseKey)
|
||||
if err == nil {
|
||||
zap.L().Debug("fetched zeus features", zap.Any("features", zeusFeatures))
|
||||
// merge featureSet and zeusFeatures in featureSet with higher priority to zeusFeatures
|
||||
featureSet = MergeFeatureSets(zeusFeatures, featureSet)
|
||||
} else {
|
||||
zap.L().Error("failed to fetch zeus features", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ah.opts.PreferSpanMetrics {
|
||||
for idx := range featureSet {
|
||||
feature := &featureSet[idx]
|
||||
@@ -20,5 +51,96 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ah.Respond(w, featureSet)
|
||||
}
|
||||
|
||||
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
|
||||
// and returns the FeatureSet.
|
||||
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
|
||||
// Check if the URL is empty
|
||||
if url == "" {
|
||||
return nil, fmt.Errorf("url is empty")
|
||||
}
|
||||
|
||||
// Check if the licenseKey is empty
|
||||
if licenseKey == "" {
|
||||
return nil, fmt.Errorf("licenseKey is empty")
|
||||
}
|
||||
|
||||
// Creating an HTTP client with a timeout for better control
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
// Creating a new GET request
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
// Setting the custom header
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
// Making the GET request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make GET request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Check for non-OK status code
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%w: %d %s", errors.New("received non-OK HTTP status code"), resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
|
||||
// Reading and decoding the response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
var zeusResponse ZeusFeaturesResponse
|
||||
if err := json.Unmarshal(body, &zeusResponse); err != nil {
|
||||
return nil, fmt.Errorf("%w: %v", errors.New("failed to decode response body"), err)
|
||||
}
|
||||
|
||||
if zeusResponse.Status != "success" {
|
||||
return nil, fmt.Errorf("%w: %s", errors.New("failed to fetch zeus features"), zeusResponse.Status)
|
||||
}
|
||||
|
||||
return zeusResponse.Data, nil
|
||||
}
|
||||
|
||||
type ZeusFeaturesResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data basemodel.FeatureSet `json:"data"`
|
||||
}
|
||||
|
||||
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
|
||||
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
|
||||
// Create a map to store the merged features
|
||||
featureMap := make(map[string]basemodel.Feature)
|
||||
|
||||
// Add all features from the otherFeatures set to the map
|
||||
for _, feature := range internalFeatures {
|
||||
featureMap[feature.Name] = feature
|
||||
}
|
||||
|
||||
// Add all features from the zeusFeatures set to the map
|
||||
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it
|
||||
for _, feature := range zeusFeatures {
|
||||
featureMap[feature.Name] = feature
|
||||
}
|
||||
|
||||
// Convert the map back to a FeatureSet slice
|
||||
var mergedFeatures basemodel.FeatureSet
|
||||
for _, feature := range featureMap {
|
||||
mergedFeatures = append(mergedFeatures, feature)
|
||||
}
|
||||
|
||||
return mergedFeatures
|
||||
}
|
||||
|
||||
88
ee/query-service/app/api/featureFlags_test.go
Normal file
88
ee/query-service/app/api/featureFlags_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestMergeFeatureSets(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
zeusFeatures basemodel.FeatureSet
|
||||
internalFeatures basemodel.FeatureSet
|
||||
expected basemodel.FeatureSet
|
||||
}{
|
||||
{
|
||||
name: "empty zeusFeatures and internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and empty internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty zeusFeatures and non-empty internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature4", Active: false},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
{Name: "Feature4", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actual := MergeFeatureSets(test.zeusFeatures, test.internalFeatures)
|
||||
assert.ElementsMatch(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
119
ee/query-service/app/api/queryrange.go
Normal file
119
ee/query-service/app/api/queryrange.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
|
||||
|
||||
if apiErrorObj != nil {
|
||||
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
|
||||
RespondError(w, apiErrorObj, nil)
|
||||
return
|
||||
}
|
||||
queryRangeParams.Version = "v4"
|
||||
|
||||
// add temporality for each metric
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
anomalyQueryExists := false
|
||||
anomalyQuery := &v3.BuilderQuery{}
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
for _, fn := range query.Functions {
|
||||
if fn.Name == v3.FunctionNameAnomaly {
|
||||
anomalyQueryExists = true
|
||||
anomalyQuery = query
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if anomalyQueryExists {
|
||||
// ensure all queries have metric data source, and there should be only one anomaly query
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource != v3.DataSourceMetrics {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the threshold, and seasonality from the anomaly query
|
||||
var seasonality anomaly.Seasonality
|
||||
for _, fn := range anomalyQuery.Functions {
|
||||
if fn.Name == v3.FunctionNameAnomaly {
|
||||
seasonalityStr, ok := fn.NamedArgs["seasonality"].(string)
|
||||
if !ok {
|
||||
seasonalityStr = "daily"
|
||||
}
|
||||
if seasonalityStr == "weekly" {
|
||||
seasonality = anomaly.SeasonalityWeekly
|
||||
} else if seasonalityStr == "daily" {
|
||||
seasonality = anomaly.SeasonalityDaily
|
||||
} else {
|
||||
seasonality = anomaly.SeasonalityHourly
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
var provider anomaly.Provider
|
||||
switch seasonality {
|
||||
case anomaly.SeasonalityWeekly:
|
||||
provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityDaily:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityHourly:
|
||||
provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
}
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
uniqueResults := make(map[string]*v3.Result)
|
||||
for _, anomaly := range anomalies.Results {
|
||||
uniqueResults[anomaly.QueryName] = anomaly
|
||||
uniqueResults[anomaly.QueryName].IsAnomaly = true
|
||||
}
|
||||
aH.Respond(w, uniqueResults)
|
||||
} else {
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
aH.QueryRangeV4(w, r)
|
||||
}
|
||||
}
|
||||
@@ -1,401 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// GetMetricResultEE runs the query and returns list of time series
|
||||
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
||||
|
||||
defer utils.Elapsed("GetMetricResult", nil)()
|
||||
zap.L().Info("Executing metric result query: ", zap.String("query", query))
|
||||
|
||||
var hash string
|
||||
// If getSubTreeSpans function is used in the clickhouse query
|
||||
if strings.Contains(query, "getSubTreeSpans(") {
|
||||
var err error
|
||||
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
|
||||
if err == fmt.Errorf("no spans found for the given query") {
|
||||
return nil, "", nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
rows, err := r.conn.Query(ctx, query)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing query", zap.Error(err))
|
||||
return nil, "", fmt.Errorf("error in processing query")
|
||||
}
|
||||
|
||||
var (
|
||||
columnTypes = rows.ColumnTypes()
|
||||
columnNames = rows.Columns()
|
||||
vars = make([]interface{}, len(columnTypes))
|
||||
)
|
||||
for i := range columnTypes {
|
||||
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
|
||||
}
|
||||
// when group by is applied, each combination of cartesian product
|
||||
// of attributes is separate series. each item in metricPointsMap
|
||||
// represent a unique series.
|
||||
metricPointsMap := make(map[string][]basemodel.MetricPoint)
|
||||
// attribute key-value pairs for each group selection
|
||||
attributesMap := make(map[string]map[string]string)
|
||||
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(vars...); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var groupBy []string
|
||||
var metricPoint basemodel.MetricPoint
|
||||
groupAttributes := make(map[string]string)
|
||||
// Assuming that the end result row contains a timestamp, value and option labels
|
||||
// Label key and value are both strings.
|
||||
for idx, v := range vars {
|
||||
colName := columnNames[idx]
|
||||
switch v := v.(type) {
|
||||
case *string:
|
||||
// special case for returning all labels
|
||||
if colName == "fullLabels" {
|
||||
var metric map[string]string
|
||||
err := json.Unmarshal([]byte(*v), &metric)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for key, val := range metric {
|
||||
groupBy = append(groupBy, val)
|
||||
groupAttributes[key] = val
|
||||
}
|
||||
} else {
|
||||
groupBy = append(groupBy, *v)
|
||||
groupAttributes[colName] = *v
|
||||
}
|
||||
case *time.Time:
|
||||
metricPoint.Timestamp = v.UnixMilli()
|
||||
case *float64:
|
||||
metricPoint.Value = *v
|
||||
case **float64:
|
||||
// ch seems to return this type when column is derived from
|
||||
// SELECT count(*)/ SELECT count(*)
|
||||
floatVal := *v
|
||||
if floatVal != nil {
|
||||
metricPoint.Value = *floatVal
|
||||
}
|
||||
case *float32:
|
||||
float32Val := float32(*v)
|
||||
metricPoint.Value = float64(float32Val)
|
||||
case *uint8, *uint64, *uint16, *uint32:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
|
||||
}
|
||||
case *int8, *int16, *int32, *int64:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
||||
}
|
||||
default:
|
||||
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
|
||||
}
|
||||
}
|
||||
sort.Strings(groupBy)
|
||||
key := strings.Join(groupBy, "")
|
||||
attributesMap[key] = groupAttributes
|
||||
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
|
||||
}
|
||||
|
||||
var seriesList []*basemodel.Series
|
||||
for key := range metricPointsMap {
|
||||
points := metricPointsMap[key]
|
||||
// first point in each series could be invalid since the
|
||||
// aggregations are applied with point from prev series
|
||||
if len(points) != 0 && len(points) > 1 {
|
||||
points = points[1:]
|
||||
}
|
||||
attributes := attributesMap[key]
|
||||
series := basemodel.Series{Labels: attributes, Points: points}
|
||||
seriesList = append(seriesList, &series)
|
||||
}
|
||||
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||
// if err != nil {
|
||||
// zap.L().Error("Error in dropping temporary table: ", err)
|
||||
// return nil, err
|
||||
// }
|
||||
if hash == "" {
|
||||
return seriesList, hash, nil
|
||||
} else {
|
||||
return seriesList, "getSubTreeSpans" + hash, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
||||
|
||||
zap.L().Debug("Executing getSubTreeSpans function")
|
||||
|
||||
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
||||
|
||||
// process the query to fetch subTree query
|
||||
var subtreeInput string
|
||||
query, subtreeInput, hash = processQuery(query, hash)
|
||||
|
||||
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in dropping temporary table", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
|
||||
// Create temporary table to store the getSubTreeSpans() results
|
||||
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
||||
if err != nil {
|
||||
zap.L().Error("Error in creating temporary table", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
|
||||
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
||||
getSpansSubQuery := subtreeInput
|
||||
// Execute the subTree query
|
||||
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
|
||||
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
||||
|
||||
// zap.L().Info(getSpansSubQuery)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return query, hash, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
var searchScanResponses []basemodel.SearchSpanDBResponseItem
|
||||
|
||||
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
|
||||
// Fetch all the spans from of same TraceID so that we can build subtree
|
||||
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
|
||||
if len(getSpansSubQueryDBResponses) == 0 {
|
||||
return query, hash, fmt.Errorf("no spans found for the given query")
|
||||
}
|
||||
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
|
||||
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return query, hash, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
// Process model to fetch the spans
|
||||
zap.L().Debug("Processing model to fetch the spans")
|
||||
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
||||
for _, item := range searchScanResponses {
|
||||
var jsonItem basemodel.SearchSpanResponseItem
|
||||
json.Unmarshal([]byte(item.Model), &jsonItem)
|
||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
|
||||
if jsonItem.Events == nil {
|
||||
jsonItem.Events = []string{}
|
||||
}
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
}
|
||||
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
||||
// Use map to store pointer to the spans to avoid duplicates and save memory
|
||||
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
|
||||
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
||||
if err != nil {
|
||||
zap.L().Error("Error in preparing batch statement", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
for _, span := range treeSearchResponse {
|
||||
var parentID string
|
||||
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
|
||||
parentID = span.References[0].SpanId
|
||||
}
|
||||
err = statement.Append(
|
||||
time.Unix(0, int64(span.TimeUnixNano)),
|
||||
span.TraceID,
|
||||
span.SpanID,
|
||||
parentID,
|
||||
span.RootSpanID,
|
||||
span.ServiceName,
|
||||
span.Name,
|
||||
span.RootName,
|
||||
uint64(span.DurationNano),
|
||||
int8(span.Kind),
|
||||
span.TagMap,
|
||||
span.Events,
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
}
|
||||
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
err = statement.Send()
|
||||
if err != nil {
|
||||
zap.L().Error("Error in sending statement", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
return query, hash, nil
|
||||
}
|
||||
|
||||
//lint:ignore SA4009 return hash is feeded to the query
|
||||
func processQuery(query string, hash string) (string, string, string) {
|
||||
re3 := regexp.MustCompile(`getSubTreeSpans`)
|
||||
|
||||
submatchall3 := re3.FindAllStringIndex(query, -1)
|
||||
getSubtreeSpansMatchIndex := submatchall3[0][1]
|
||||
|
||||
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
|
||||
|
||||
sqlCompleteIndex := 0
|
||||
countParenthesisImbalance := 0
|
||||
for i, char := range query2countParenthesis {
|
||||
|
||||
if string(char) == "(" {
|
||||
countParenthesisImbalance += 1
|
||||
}
|
||||
if string(char) == ")" {
|
||||
countParenthesisImbalance -= 1
|
||||
}
|
||||
if countParenthesisImbalance == 0 {
|
||||
sqlCompleteIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
|
||||
|
||||
// hash the subtreeInput
|
||||
hmd5 := md5.Sum([]byte(subtreeInput))
|
||||
hash = fmt.Sprintf("%x", hmd5)
|
||||
|
||||
// Reformat the query to use the getSubTreeSpans function
|
||||
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
|
||||
return query, subtreeInput, hash
|
||||
}
|
||||
|
||||
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
|
||||
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
|
||||
|
||||
var spans []*model.SpanForTraceDetails
|
||||
for _, spanItem := range payload {
|
||||
var parentID string
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
ServiceName: spanItem.ServiceName,
|
||||
Name: spanItem.Name,
|
||||
Kind: spanItem.Kind,
|
||||
DurationNano: spanItem.DurationNano,
|
||||
TagMap: spanItem.TagMap,
|
||||
ParentID: parentID,
|
||||
Events: spanItem.Events,
|
||||
HasError: spanItem.HasError,
|
||||
}
|
||||
spans = append(spans, span)
|
||||
}
|
||||
|
||||
zap.L().Debug("Building Tree")
|
||||
roots, err := buildSpanTrees(&spans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
|
||||
// Every span which was fetched from getSubTree Input SQL query is considered root
|
||||
// For each root, get the subtree spans
|
||||
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
||||
// Search target span object in the tree
|
||||
for _, root := range roots {
|
||||
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
||||
if targetSpan != nil {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if targetSpan == nil {
|
||||
return nil, nil
|
||||
}
|
||||
// Build subtree for the target span
|
||||
// Mark the target span as root by setting parent ID as empty string
|
||||
targetSpan.ParentID = ""
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
|
||||
// Get the subtree child spans
|
||||
for i := 0; len(preParents) != 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
children = append(children, parent.Children...)
|
||||
parents = append(parents, parent.Children...)
|
||||
}
|
||||
preParents = parents
|
||||
}
|
||||
|
||||
resultSpans := children
|
||||
// Add the target span to the result spans
|
||||
resultSpans = append(resultSpans, targetSpan)
|
||||
|
||||
for _, item := range resultSpans {
|
||||
references := []basemodel.OtelSpanRef{
|
||||
{
|
||||
TraceId: item.TraceID,
|
||||
SpanId: item.ParentID,
|
||||
RefType: "CHILD_OF",
|
||||
},
|
||||
}
|
||||
|
||||
if item.Events == nil {
|
||||
item.Events = []string{}
|
||||
}
|
||||
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
|
||||
TimeUnixNano: item.TimeUnixNano,
|
||||
SpanID: item.SpanID,
|
||||
TraceID: item.TraceID,
|
||||
ServiceName: item.ServiceName,
|
||||
Name: item.Name,
|
||||
Kind: item.Kind,
|
||||
References: references,
|
||||
DurationNano: item.DurationNano,
|
||||
TagMap: item.TagMap,
|
||||
Events: item.Events,
|
||||
HasError: item.HasError,
|
||||
RootSpanID: getSpansSubQueryDBResponse.SpanID,
|
||||
RootName: targetSpan.Name,
|
||||
}
|
||||
}
|
||||
}
|
||||
return searchSpansResult, nil
|
||||
}
|
||||
@@ -25,8 +25,9 @@ func NewDataConnector(
|
||||
maxOpenConns int,
|
||||
dialTimeout time.Duration,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"os"
|
||||
"regexp"
|
||||
@@ -27,7 +28,10 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
@@ -41,6 +45,7 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
@@ -48,7 +53,7 @@ import (
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
@@ -72,12 +77,13 @@ type ServerOptions struct {
|
||||
FluxInterval string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
type Server struct {
|
||||
serverOptions *ServerOptions
|
||||
ruleManager *rules.Manager
|
||||
ruleManager *baserules.Manager
|
||||
|
||||
// public http router
|
||||
httpConn net.Listener
|
||||
@@ -110,6 +116,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
@@ -118,33 +128,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
|
||||
gatewayFeature := basemodel.Feature{
|
||||
Name: "GATEWAY",
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
}
|
||||
|
||||
//Activate this feature if the url is not empty
|
||||
var gatewayProxy *httputil.ReverseProxy
|
||||
if serverOptions.GatewayUrl == "" {
|
||||
gatewayFeature.Active = false
|
||||
gatewayProxy, err = gateway.NewNoopProxy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
zap.L().Info("Enabling gateway feature flag ...")
|
||||
gatewayFeature.Active = true
|
||||
gatewayProxy, err = gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB, gatewayFeature)
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -165,6 +155,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.MaxOpenConns,
|
||||
serverOptions.DialTimeout,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
@@ -179,6 +170,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
@@ -186,13 +185,23 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm)
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
@@ -237,15 +246,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
|
||||
if err != nil {
|
||||
@@ -269,6 +269,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
@@ -323,7 +324,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
@@ -340,7 +341,17 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
|
||||
// add auth middleware
|
||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||
return auth.GetUserFromRequest(r, apiHandler)
|
||||
user, err := auth.GetUserFromRequest(r, apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user.User.OrgId == "" {
|
||||
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
@@ -354,11 +365,13 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
apiHandler.RegisterWebSocketPaths(r, am)
|
||||
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
@@ -370,6 +383,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -381,6 +395,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
@@ -393,27 +408,41 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||
// we default to that status code.
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Flush implements the http.Flush interface.
|
||||
func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Support websockets
|
||||
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h, ok := lrw.ResponseWriter.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("hijack not supported")
|
||||
}
|
||||
return h.Hijack()
|
||||
}
|
||||
|
||||
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||
@@ -550,6 +579,7 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@@ -702,8 +732,10 @@ func makeRulesManager(
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup) (*rules.Manager, error) {
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool) (*baserules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -719,12 +751,9 @@ func makeRulesManager(
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &rules.ManagerOptions{
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
NotifierOpts: notifierOpts,
|
||||
Queriers: &rules.Queriers{
|
||||
PqlEngine: pqle,
|
||||
Ch: ch.GetConn(),
|
||||
},
|
||||
PqlEngine: pqle,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
@@ -732,10 +761,15 @@ func makeRulesManager(
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
manager, err := rules.NewManager(managerOpts)
|
||||
manager, err := baserules.NewManager(managerOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ const (
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||
var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
|
||||
var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
|
||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
|
||||
@@ -20,11 +20,14 @@ import (
|
||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
||||
// get auth domain from email domain
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||
}
|
||||
if domain == nil {
|
||||
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
|
||||
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
|
||||
}
|
||||
|
||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||
if err != nil {
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
)
|
||||
|
||||
func NewNoopProxy() (*httputil.ReverseProxy, error) {
|
||||
return nil, nil
|
||||
return &httputil.ReverseProxy{}, nil
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a
|
||||
for _, l := range licenses {
|
||||
l.ParsePlan()
|
||||
|
||||
if l.Key == lm.activeLicense.Key {
|
||||
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key {
|
||||
l.IsCurrent = true
|
||||
}
|
||||
|
||||
|
||||
@@ -87,6 +87,7 @@ func main() {
|
||||
var ruleRepoURL string
|
||||
var cluster string
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
@@ -96,6 +97,7 @@ func main() {
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
@@ -134,6 +136,7 @@ func main() {
|
||||
FluxInterval: fluxInterval,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
||||
@@ -11,6 +11,8 @@ const Enterprise = "ENTERPRISE_PLAN"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
const Gateway = "GATEWAY"
|
||||
const PremiumSupport = "PREMIUM_SUPPORT"
|
||||
|
||||
var BasicPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
@@ -111,6 +113,27 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
@@ -205,6 +228,27 @@ var ProPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
@@ -313,4 +357,25 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
393
ee/query-service/rules/anomaly.go
Normal file
393
ee/query-service/rules/anomaly.go
Normal file
@@ -0,0 +1,393 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/times"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/formatter"
|
||||
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
RuleTypeAnomaly = "anomaly_rule"
|
||||
)
|
||||
|
||||
type AnomalyRule struct {
|
||||
*baserules.BaseRule
|
||||
|
||||
mtx sync.Mutex
|
||||
|
||||
reader interfaces.Reader
|
||||
|
||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||
querierV2 interfaces.Querier
|
||||
|
||||
provider anomaly.Provider
|
||||
|
||||
seasonality anomaly.Seasonality
|
||||
}
|
||||
|
||||
func NewAnomalyRule(
|
||||
id string,
|
||||
p *baserules.PostableRule,
|
||||
featureFlags interfaces.FeatureLookup,
|
||||
reader interfaces.Reader,
|
||||
cache cache.Cache,
|
||||
opts ...baserules.RuleOption,
|
||||
) (*AnomalyRule, error) {
|
||||
|
||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
||||
|
||||
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := AnomalyRule{
|
||||
BaseRule: baseRule,
|
||||
}
|
||||
|
||||
switch strings.ToLower(p.RuleCondition.Seasonality) {
|
||||
case "hourly":
|
||||
t.seasonality = anomaly.SeasonalityHourly
|
||||
case "daily":
|
||||
t.seasonality = anomaly.SeasonalityDaily
|
||||
case "weekly":
|
||||
t.seasonality = anomaly.SeasonalityWeekly
|
||||
default:
|
||||
t.seasonality = anomaly.SeasonalityDaily
|
||||
}
|
||||
|
||||
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
}
|
||||
|
||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||
t.reader = reader
|
||||
if t.seasonality == anomaly.SeasonalityHourly {
|
||||
t.provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](featureFlags),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||
t.provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](featureFlags),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||
t.provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](featureFlags),
|
||||
)
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) Type() baserules.RuleType {
|
||||
return RuleTypeAnomaly
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||
|
||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
|
||||
|
||||
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
||||
end := ts.UnixMilli()
|
||||
|
||||
if r.EvalDelay() > 0 {
|
||||
start = start - int64(r.EvalDelay().Milliseconds())
|
||||
end = end - int64(r.EvalDelay().Milliseconds())
|
||||
}
|
||||
// round to minute otherwise we could potentially miss data
|
||||
start = start - (start % (60 * 1000))
|
||||
end = end - (end % (60 * 1000))
|
||||
|
||||
compositeQuery := r.Condition().CompositeQuery
|
||||
|
||||
if compositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
compositeQuery.PanelType = v3.PanelTypeGraph
|
||||
}
|
||||
|
||||
// default mode
|
||||
return &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
|
||||
CompositeQuery: compositeQuery,
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) GetSelectedQuery() string {
|
||||
return r.Condition().GetSelectedQueryName()
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baserules.Vector, error) {
|
||||
|
||||
params, err := r.prepareQueryRange(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.PopulateTemporality(ctx, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error while setting temporality")
|
||||
}
|
||||
|
||||
anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{
|
||||
Params: params,
|
||||
Seasonality: r.seasonality,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var queryResult *v3.Result
|
||||
for _, result := range anomalies.Results {
|
||||
if result.QueryName == r.GetSelectedQuery() {
|
||||
queryResult = result
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var resultVector baserules.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||
if shouldAlert {
|
||||
resultVector = append(resultVector, smpl)
|
||||
}
|
||||
}
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) {
|
||||
|
||||
prevState := r.State()
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
res, err := r.buildAndRunQuery(ctx, ts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
resultFPs := map[uint64]struct{}{}
|
||||
var alerts = make(map[uint64]*baserules.Alert, len(res))
|
||||
|
||||
for _, smpl := range res {
|
||||
l := make(map[string]string, len(smpl.Metric))
|
||||
for _, lbl := range smpl.Metric {
|
||||
l[lbl.Name] = lbl.Value
|
||||
}
|
||||
|
||||
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
||||
|
||||
tmplData := baserules.AlertTemplateData(l, value, threshold)
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
// who are not used to Go's templating system.
|
||||
defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}"
|
||||
|
||||
// utility function to apply go template on labels and annotations
|
||||
expand := func(text string) string {
|
||||
|
||||
tmpl := baserules.NewTemplateExpander(
|
||||
ctx,
|
||||
defs+text,
|
||||
"__alert_"+r.Name(),
|
||||
tmplData,
|
||||
times.Time(timestamp.FromTime(ts)),
|
||||
nil,
|
||||
)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel)
|
||||
resultLabels := labels.NewBuilder(smpl.MetricOrig).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel).Labels()
|
||||
|
||||
for name, value := range r.Labels().Map() {
|
||||
lb.Set(name, expand(value))
|
||||
}
|
||||
|
||||
lb.Set(labels.AlertNameLabel, r.Name())
|
||||
lb.Set(labels.AlertRuleIdLabel, r.ID())
|
||||
lb.Set(labels.RuleSourceLabel, r.GeneratorURL())
|
||||
|
||||
annotations := make(labels.Labels, 0, len(r.Annotations().Map()))
|
||||
for name, value := range r.Annotations().Map() {
|
||||
annotations = append(annotations, labels.Label{Name: common.NormalizeLabelName(name), Value: expand(value)})
|
||||
}
|
||||
if smpl.IsMissing {
|
||||
lb.Set(labels.AlertNameLabel, "[No data] "+r.Name())
|
||||
}
|
||||
|
||||
lbs := lb.Labels()
|
||||
h := lbs.Hash()
|
||||
resultFPs[h] = struct{}{}
|
||||
|
||||
if _, ok := alerts[h]; ok {
|
||||
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alerts[h] = &baserules.Alert{
|
||||
Labels: lbs,
|
||||
QueryResultLables: resultLabels,
|
||||
Annotations: annotations,
|
||||
ActiveAt: ts,
|
||||
State: model.StatePending,
|
||||
Value: smpl.V,
|
||||
GeneratorURL: r.GeneratorURL(),
|
||||
Receivers: r.PreferredChannels(),
|
||||
Missing: smpl.IsMissing,
|
||||
}
|
||||
}
|
||||
|
||||
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
||||
|
||||
// alerts[h] is ready, add or update active list now
|
||||
for h, a := range alerts {
|
||||
// Check whether we already have alerting state for the identifying label set.
|
||||
// Update the last value and annotations if so, create a new alert entry otherwise.
|
||||
if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive {
|
||||
|
||||
alert.Value = a.Value
|
||||
alert.Annotations = a.Annotations
|
||||
alert.Receivers = r.PreferredChannels()
|
||||
continue
|
||||
}
|
||||
|
||||
r.Active[h] = a
|
||||
}
|
||||
|
||||
itemsToAdd := []model.RuleStateHistory{}
|
||||
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, a := range r.Active {
|
||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||
if err != nil {
|
||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
||||
}
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > baserules.ResolvedRetention) {
|
||||
delete(r.Active, fp)
|
||||
}
|
||||
if a.State != model.StateInactive {
|
||||
a.State = model.StateInactive
|
||||
a.ResolvedAt = ts
|
||||
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: model.StateInactive,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: model.LabelsString(labelsJSON),
|
||||
Fingerprint: a.QueryResultLables.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if a.State == model.StatePending && ts.Sub(a.ActiveAt) >= r.HoldDuration() {
|
||||
a.State = model.StateFiring
|
||||
a.FiredAt = ts
|
||||
state := model.StateFiring
|
||||
if a.Missing {
|
||||
state = model.StateNoData
|
||||
}
|
||||
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: state,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: model.LabelsString(labelsJSON),
|
||||
Fingerprint: a.QueryResultLables.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
currentState := r.State()
|
||||
|
||||
overallStateChanged := currentState != prevState
|
||||
for idx, item := range itemsToAdd {
|
||||
item.OverallStateChanged = overallStateChanged
|
||||
item.OverallState = currentState
|
||||
itemsToAdd[idx] = item
|
||||
}
|
||||
|
||||
r.RecordRuleStateHistory(ctx, prevState, currentState, itemsToAdd)
|
||||
|
||||
return len(r.Active), nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) String() string {
|
||||
|
||||
ar := baserules.PostableRule{
|
||||
AlertName: r.Name(),
|
||||
RuleCondition: r.Condition(),
|
||||
EvalWindow: baserules.Duration(r.EvalWindow()),
|
||||
Labels: r.Labels().Map(),
|
||||
Annotations: r.Annotations().Map(),
|
||||
PreferredChannels: r.PreferredChannels(),
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(ar)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
||||
}
|
||||
|
||||
return string(byt)
|
||||
}
|
||||
89
ee/query-service/rules/manager.go
Normal file
89
ee/query-service/rules/manager.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
)
|
||||
|
||||
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
|
||||
|
||||
rules := make([]baserules.Rule, 0)
|
||||
var task baserules.Task
|
||||
|
||||
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
||||
if opts.Rule.RuleType == baserules.RuleTypeThreshold {
|
||||
// create a threshold rule
|
||||
tr, err := baserules.NewThresholdRule(
|
||||
ruleId,
|
||||
opts.Rule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return task, err
|
||||
}
|
||||
|
||||
rules = append(rules, tr)
|
||||
|
||||
// create ch rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
|
||||
} else if opts.Rule.RuleType == baserules.RuleTypeProm {
|
||||
|
||||
// create promql rule
|
||||
pr, err := baserules.NewPromRule(
|
||||
ruleId,
|
||||
opts.Rule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return task, err
|
||||
}
|
||||
|
||||
rules = append(rules, pr)
|
||||
|
||||
// create promql rule task for evalution
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
|
||||
} else if opts.Rule.RuleType == baserules.RuleTypeAnomaly {
|
||||
// create anomaly rule
|
||||
ar, err := NewAnomalyRule(
|
||||
ruleId,
|
||||
opts.Rule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.Cache,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
if err != nil {
|
||||
return task, err
|
||||
}
|
||||
|
||||
rules = append(rules, ar)
|
||||
|
||||
// create anomaly rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported rule type. Supported types: %s, %s", baserules.RuleTypeProm, baserules.RuleTypeThreshold)
|
||||
}
|
||||
|
||||
return task, nil
|
||||
}
|
||||
|
||||
// newTask returns an appropriate group for
|
||||
// rule type
|
||||
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
|
||||
if taskType == baserules.TaskTypeCh {
|
||||
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||
}
|
||||
return baserules.NewPromRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ const config: Config.InitialOptions = {
|
||||
modulePathIgnorePatterns: ['dist'],
|
||||
moduleNameMapper: {
|
||||
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||
'\\.md$': '<rootDir>/__mocks__/cssMock.ts',
|
||||
},
|
||||
globals: {
|
||||
extensionsToTreatAsEsm: ['.ts'],
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"ansi-to-html": "0.7.2",
|
||||
"antd": "5.11.0",
|
||||
"antd-table-saveas-excel": "2.2.1",
|
||||
"axios": "1.6.4",
|
||||
"axios": "1.7.4",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^29.6.4",
|
||||
"babel-loader": "9.1.3",
|
||||
@@ -88,7 +88,7 @@
|
||||
"lucide-react": "0.379.0",
|
||||
"mini-css-extract-plugin": "2.4.5",
|
||||
"papaparse": "5.4.1",
|
||||
"posthog-js": "1.142.1",
|
||||
"posthog-js": "1.160.3",
|
||||
"rc-tween-one": "3.0.6",
|
||||
"react": "18.2.0",
|
||||
"react-addons-update": "15.6.3",
|
||||
@@ -110,6 +110,8 @@
|
||||
"react-syntax-highlighter": "15.5.0",
|
||||
"react-use": "^17.3.2",
|
||||
"react-virtuoso": "4.0.3",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"overlayscrollbars": "^2.8.1",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"rehype-raw": "7.0.0",
|
||||
@@ -205,7 +207,6 @@
|
||||
"eslint-plugin-sonarjs": "^0.12.0",
|
||||
"husky": "^7.0.4",
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.2",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"lint-staged": "^12.5.0",
|
||||
"msw": "1.3.2",
|
||||
|
||||
1
frontend/public/Icons/groupBy.svg
Normal file
1
frontend/public/Icons/groupBy.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4344_1236)" stroke="#C0C1C3" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M4.667 1.167H2.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V2.333c0-.644-.522-1.166-1.166-1.166zM8.167 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M11.667 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M5.833 10.5H2.917c-.992 0-1.75-.758-1.75-1.75v-.583"/><path d="M4.083 12.25l1.75-1.75-1.75-1.75M11.667 8.167H9.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V9.333c0-.644-.522-1.166-1.166-1.166z"/></g><defs><clipPath id="prefix__clip0_4344_1236"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>
|
||||
|
After Width: | Height: | Size: 878 B |
1
frontend/public/Icons/solid-x-circle.svg
Normal file
1
frontend/public/Icons/solid-x-circle.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4062_7291)" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M7 12.833A5.833 5.833 0 107 1.167a5.833 5.833 0 000 11.666z" fill="#E5484D" stroke="#E5484D"/><path d="M8.75 5.25l-3.5 3.5M5.25 5.25l3.5 3.5" stroke="#121317"/></g><defs><clipPath id="prefix__clip0_4062_7291"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>
|
||||
|
After Width: | Height: | Size: 467 B |
BIN
frontend/public/fonts/GeistMonoVF.woff2
Normal file
BIN
frontend/public/fonts/GeistMonoVF.woff2
Normal file
Binary file not shown.
@@ -53,6 +53,7 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"breadcrumb": "Messaging Queues",
|
||||
"header": "Kafka / Overview",
|
||||
"overview": {
|
||||
"title": "Start sending data in as little as 20 minutes",
|
||||
"subtitle": "Connect and Monitor Your Data Streams"
|
||||
},
|
||||
"configureConsumer": {
|
||||
"title": "Configure Consumer",
|
||||
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"configureProducer": {
|
||||
"title": "Configure Producer",
|
||||
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"monitorKafka": {
|
||||
"title": "Monitor kafka",
|
||||
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"summarySection": {
|
||||
"viewDetailsButton": "View Details"
|
||||
},
|
||||
"confirmModal": {
|
||||
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
||||
"okText": "Proceed"
|
||||
}
|
||||
}
|
||||
8
frontend/public/locales/en-GB/onboarding.json
Normal file
8
frontend/public/locales/en-GB/onboarding.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"invite_user": "Invite your teammates",
|
||||
"invite": "Invite",
|
||||
"skip": "Skip",
|
||||
"invite_user_helper_text": "Not the right person to get started? No worries! Invite someone who can.",
|
||||
"select_use_case": "Select a use-case to get started",
|
||||
"get_started": "Get Started"
|
||||
}
|
||||
@@ -40,6 +40,7 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or "
|
||||
}
|
||||
|
||||
@@ -38,5 +38,7 @@
|
||||
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
||||
"SUPPORT": "SigNoz | Support",
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz"
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz",
|
||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview"
|
||||
}
|
||||
|
||||
22
frontend/public/locales/en-GB/workspaceLocked.json
Normal file
22
frontend/public/locales/en-GB/workspaceLocked.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"trialPlanExpired": "Trial Plan Expired",
|
||||
"gotQuestions": "Got Questions?",
|
||||
"contactUs": "Contact Us",
|
||||
"upgradeToContinue": "Upgrade to Continue",
|
||||
"upgradeNow": "Upgrade now to keep enjoying all the great features you’ve been using.",
|
||||
"yourDataIsSafe": "Your data is safe with us until",
|
||||
"actNow": "Act now to avoid any disruptions and continue where you left off.",
|
||||
"contactAdmin": "Contact your admin to proceed with the upgrade.",
|
||||
"continueMyJourney": "Continue My Journey",
|
||||
"needMoreTime": "Need More Time?",
|
||||
"extendTrial": "Extend Trial",
|
||||
"extendTrialMsgPart1": "If you have a specific reason why you were not able to finish your PoC in the trial period, please write to us on",
|
||||
"extendTrialMsgPart2": "with the reason. Sometimes we can extend trial by a few days on a case by case basis",
|
||||
"whyChooseSignoz": "Why choose Signoz",
|
||||
"enterpriseGradeObservability": "Enterprise-grade Observability",
|
||||
"observabilityDescription": "Get access to observability at any scale with advanced security and compliance.",
|
||||
"continueToUpgrade": "Continue to Upgrade",
|
||||
"youAreInGoodCompany": "You are in good company",
|
||||
"faqs": "FAQs",
|
||||
"somethingWentWrong": "Something went wrong"
|
||||
}
|
||||
@@ -53,6 +53,7 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -6,5 +6,6 @@
|
||||
"share": "Share",
|
||||
"save": "Save",
|
||||
"edit": "Edit",
|
||||
"logged_in": "Logged In"
|
||||
"logged_in": "Logged In",
|
||||
"pending_data_placeholder": "Just a bit of patience, just a little bit’s enough ⎯ we’re getting your {{dataSource}}!"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"create_dashboard": "Create Dashboard",
|
||||
"import_json": "Import Dashboard JSON",
|
||||
"view_template": "View templates",
|
||||
"import_grafana_json": "Import Grafana JSON",
|
||||
"copy_to_clipboard": "Copy To ClipBoard",
|
||||
"download_json": "Download JSON",
|
||||
|
||||
30
frontend/public/locales/en/messagingQueuesKafkaOverview.json
Normal file
30
frontend/public/locales/en/messagingQueuesKafkaOverview.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"breadcrumb": "Messaging Queues",
|
||||
"header": "Kafka / Overview",
|
||||
"overview": {
|
||||
"title": "Start sending data in as little as 20 minutes",
|
||||
"subtitle": "Connect and Monitor Your Data Streams"
|
||||
},
|
||||
"configureConsumer": {
|
||||
"title": "Configure Consumer",
|
||||
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"configureProducer": {
|
||||
"title": "Configure Producer",
|
||||
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"monitorKafka": {
|
||||
"title": "Monitor kafka",
|
||||
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"summarySection": {
|
||||
"viewDetailsButton": "View Details"
|
||||
},
|
||||
"confirmModal": {
|
||||
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
||||
"okText": "Proceed"
|
||||
}
|
||||
}
|
||||
8
frontend/public/locales/en/onboarding.json
Normal file
8
frontend/public/locales/en/onboarding.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"invite_user": "Invite your teammates",
|
||||
"invite": "Invite",
|
||||
"skip": "Skip",
|
||||
"invite_user_helper_text": "Not the right person to get started? No worries! Invite someone who can.",
|
||||
"select_use_case": "Select a use-case to get started",
|
||||
"get_started": "Get Started"
|
||||
}
|
||||
@@ -40,6 +40,7 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or "
|
||||
}
|
||||
|
||||
@@ -49,5 +49,8 @@
|
||||
"TRACES_SAVE_VIEWS": "SigNoz | Traces Saved Views",
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz",
|
||||
"SHORTCUTS": "SigNoz | Shortcuts",
|
||||
"INTEGRATIONS": "SigNoz | Integrations"
|
||||
"INTEGRATIONS": "SigNoz | Integrations",
|
||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
|
||||
"MESSAGING_QUEUES": "SigNoz | Messaging Queues"
|
||||
}
|
||||
|
||||
22
frontend/public/locales/en/workspaceLocked.json
Normal file
22
frontend/public/locales/en/workspaceLocked.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"trialPlanExpired": "Trial Plan Expired",
|
||||
"gotQuestions": "Got Questions?",
|
||||
"contactUs": "Contact Us",
|
||||
"upgradeToContinue": "Upgrade to Continue",
|
||||
"upgradeNow": "Upgrade now to keep enjoying all the great features you’ve been using.",
|
||||
"yourDataIsSafe": "Your data is safe with us until",
|
||||
"actNow": "Act now to avoid any disruptions and continue where you left off.",
|
||||
"contactAdmin": "Contact your admin to proceed with the upgrade.",
|
||||
"continueMyJourney": "Continue My Journey",
|
||||
"needMoreTime": "Need More Time?",
|
||||
"extendTrial": "Extend Trial",
|
||||
"extendTrialMsgPart1": "If you have a specific reason why you were not able to finish your PoC in the trial period, please write to us on",
|
||||
"extendTrialMsgPart2": "with the reason. Sometimes we can extend trial by a few days on a case by case basis",
|
||||
"whyChooseSignoz": "Why choose Signoz",
|
||||
"enterpriseGradeObservability": "Enterprise-grade Observability",
|
||||
"observabilityDescription": "Get access to observability at any scale with advanced security and compliance.",
|
||||
"continueToUpgrade": "Continue to Upgrade",
|
||||
"youAreInGoodCompany": "You are in good company",
|
||||
"faqs": "FAQs",
|
||||
"somethingWentWrong": "Something went wrong"
|
||||
}
|
||||
@@ -76,9 +76,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
isUserFetching: false,
|
||||
},
|
||||
});
|
||||
|
||||
if (!isLoggedIn) {
|
||||
history.push(ROUTES.LOGIN);
|
||||
history.push(ROUTES.LOGIN, { from: pathname });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { ConfigProvider } from 'antd';
|
||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||
import setLocalStorageApi from 'api/browser/localstorage/set';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import NotFound from 'components/NotFound';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
@@ -18,6 +19,7 @@ import { ResourceProvider } from 'hooks/useResourceAttribute';
|
||||
import history from 'lib/history';
|
||||
import { identity, pick, pickBy } from 'lodash-es';
|
||||
import posthog from 'posthog-js';
|
||||
import AlertRuleProvider from 'providers/Alert';
|
||||
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
|
||||
import { QueryBuilderProvider } from 'providers/QueryBuilder';
|
||||
import { Suspense, useEffect, useState } from 'react';
|
||||
@@ -48,7 +50,7 @@ function App(): JSX.Element {
|
||||
|
||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
||||
|
||||
const { trackPageView, trackEvent } = useAnalytics();
|
||||
const { trackPageView } = useAnalytics();
|
||||
|
||||
const { hostname, pathname } = window.location;
|
||||
|
||||
@@ -65,6 +67,14 @@ function App(): JSX.Element {
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.CHAT_SUPPORT)?.active ||
|
||||
false;
|
||||
|
||||
const isPremiumSupportEnabled =
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.PREMIUM_SUPPORT)?.active ||
|
||||
false;
|
||||
|
||||
const showAddCreditCardModal =
|
||||
!isPremiumSupportEnabled &&
|
||||
!licenseData?.payload?.trialConvertedToSubscription;
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_FEATURE_FLAG_RESPONSE,
|
||||
payload: {
|
||||
@@ -81,7 +91,7 @@ function App(): JSX.Element {
|
||||
setRoutes(newRoutes);
|
||||
}
|
||||
|
||||
if (isLoggedInState && isChatSupportEnabled) {
|
||||
if (isLoggedInState && isChatSupportEnabled && !showAddCreditCardModal) {
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
window.Intercom('boot', {
|
||||
@@ -127,7 +137,6 @@ function App(): JSX.Element {
|
||||
|
||||
window.analytics.identify(email, sanitizedIdentifyPayload);
|
||||
window.analytics.group(domain, groupTraits);
|
||||
window.clarity('identify', email, name);
|
||||
|
||||
posthog?.identify(email, {
|
||||
email,
|
||||
@@ -199,7 +208,7 @@ function App(): JSX.Element {
|
||||
LOCALSTORAGE.THEME_ANALYTICS_V1,
|
||||
);
|
||||
if (!isThemeAnalyticsSent) {
|
||||
trackEvent('Theme Analytics', {
|
||||
logEvent('Theme Analytics', {
|
||||
theme: isDarkMode ? THEME_MODE.DARK : THEME_MODE.LIGHT,
|
||||
user: pick(user, ['email', 'userId', 'name']),
|
||||
org,
|
||||
@@ -227,22 +236,24 @@ function App(): JSX.Element {
|
||||
<QueryBuilderProvider>
|
||||
<DashboardProvider>
|
||||
<KeyboardHotkeysProvider>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }) => (
|
||||
<Route
|
||||
key={`${path}`}
|
||||
exact={exact}
|
||||
path={path}
|
||||
component={component}
|
||||
/>
|
||||
))}
|
||||
<AlertRuleProvider>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }) => (
|
||||
<Route
|
||||
key={`${path}`}
|
||||
exact={exact}
|
||||
path={path}
|
||||
component={component}
|
||||
/>
|
||||
))}
|
||||
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</AlertRuleProvider>
|
||||
</KeyboardHotkeysProvider>
|
||||
</DashboardProvider>
|
||||
</QueryBuilderProvider>
|
||||
|
||||
@@ -92,6 +92,14 @@ export const CreateNewAlerts = Loadable(
|
||||
() => import(/* webpackChunkName: "Create Alerts" */ 'pages/CreateAlert'),
|
||||
);
|
||||
|
||||
export const AlertHistory = Loadable(
|
||||
() => import(/* webpackChunkName: "Alert History" */ 'pages/AlertList'),
|
||||
);
|
||||
|
||||
export const AlertOverview = Loadable(
|
||||
() => import(/* webpackChunkName: "Alert Overview" */ 'pages/AlertList'),
|
||||
);
|
||||
|
||||
export const CreateAlertChannelAlerts = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "Create Channels" */ 'pages/AlertChannelCreate'),
|
||||
@@ -204,3 +212,15 @@ export const InstalledIntegrations = Loadable(
|
||||
/* webpackChunkName: "InstalledIntegrations" */ 'pages/IntegrationsModulePage'
|
||||
),
|
||||
);
|
||||
|
||||
export const MessagingQueues = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "MessagingQueues" */ 'pages/MessagingQueues'),
|
||||
);
|
||||
|
||||
export const MQDetailPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "MQDetailPage" */ 'pages/MessagingQueues/MQDetailPage'
|
||||
),
|
||||
);
|
||||
|
||||
@@ -2,6 +2,8 @@ import ROUTES from 'constants/routes';
|
||||
import { RouteProps } from 'react-router-dom';
|
||||
|
||||
import {
|
||||
AlertHistory,
|
||||
AlertOverview,
|
||||
AllAlertChannels,
|
||||
AllErrors,
|
||||
APIKeys,
|
||||
@@ -23,6 +25,8 @@ import {
|
||||
LogsExplorer,
|
||||
LogsIndexToFields,
|
||||
LogsSaveViews,
|
||||
MessagingQueues,
|
||||
MQDetailPage,
|
||||
MySettings,
|
||||
NewDashboardPage,
|
||||
OldLogsExplorer,
|
||||
@@ -169,6 +173,20 @@ const routes: AppRoutes[] = [
|
||||
isPrivate: true,
|
||||
key: 'ALERTS_NEW',
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALERT_HISTORY,
|
||||
exact: true,
|
||||
component: AlertHistory,
|
||||
isPrivate: true,
|
||||
key: 'ALERT_HISTORY',
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALERT_OVERVIEW,
|
||||
exact: true,
|
||||
component: AlertOverview,
|
||||
isPrivate: true,
|
||||
key: 'ALERT_OVERVIEW',
|
||||
},
|
||||
{
|
||||
path: ROUTES.TRACE,
|
||||
exact: true,
|
||||
@@ -351,6 +369,20 @@ const routes: AppRoutes[] = [
|
||||
isPrivate: true,
|
||||
key: 'INTEGRATIONS',
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
key: 'MESSAGING_QUEUES',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_DETAIL,
|
||||
exact: true,
|
||||
component: MQDetailPage,
|
||||
key: 'MESSAGING_QUEUES_DETAIL',
|
||||
isPrivate: true,
|
||||
},
|
||||
];
|
||||
|
||||
export const SUPPORT_ROUTE: AppRoutes = {
|
||||
|
||||
@@ -9,9 +9,9 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
|
||||
// making the error status code as standard Error Status Code
|
||||
const statusCode = response.status as ErrorStatusCode;
|
||||
|
||||
if (statusCode >= 400 && statusCode < 500) {
|
||||
const { data } = response as AxiosResponse;
|
||||
const { data } = response as AxiosResponse;
|
||||
|
||||
if (statusCode >= 400 && statusCode < 500) {
|
||||
if (statusCode === 404) {
|
||||
return {
|
||||
statusCode,
|
||||
@@ -34,12 +34,11 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
|
||||
body: JSON.stringify((response.data as any).data),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
statusCode,
|
||||
payload: null,
|
||||
error: 'Something went wrong',
|
||||
message: null,
|
||||
message: data?.error,
|
||||
};
|
||||
}
|
||||
if (request) {
|
||||
|
||||
@@ -1,26 +1,20 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/create';
|
||||
|
||||
const create = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post('/rules', {
|
||||
...props.data,
|
||||
});
|
||||
const response = await axios.post('/rules', {
|
||||
...props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
};
|
||||
|
||||
export default create;
|
||||
|
||||
@@ -1,24 +1,18 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/delete';
|
||||
|
||||
const deleteAlerts = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.delete(`/rules/${props.id}`);
|
||||
const response = await axios.delete(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
};
|
||||
|
||||
export default deleteAlerts;
|
||||
|
||||
@@ -1,24 +1,16 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/get';
|
||||
|
||||
const get = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.get(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
const response = await axios.get(`/rules/${props.id}`);
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
};
|
||||
|
||||
export default get;
|
||||
|
||||
@@ -1,26 +1,20 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/patch';
|
||||
|
||||
const patch = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.patch(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
const response = await axios.patch(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
};
|
||||
|
||||
export default patch;
|
||||
|
||||
@@ -1,26 +1,20 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/save';
|
||||
|
||||
const put = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.put(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
const response = await axios.put(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
};
|
||||
|
||||
export default put;
|
||||
|
||||
28
frontend/src/api/alerts/ruleStats.ts
Normal file
28
frontend/src/api/alerts/ruleStats.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleStatsPayload } from 'types/api/alerts/def';
|
||||
import { RuleStatsProps } from 'types/api/alerts/ruleStats';
|
||||
|
||||
const ruleStats = async (
|
||||
props: RuleStatsProps,
|
||||
): Promise<SuccessResponse<AlertRuleStatsPayload> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post(`/rules/${props.id}/history/stats`, {
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default ruleStats;
|
||||
33
frontend/src/api/alerts/timelineGraph.ts
Normal file
33
frontend/src/api/alerts/timelineGraph.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleTimelineGraphResponsePayload } from 'types/api/alerts/def';
|
||||
import { GetTimelineGraphRequestProps } from 'types/api/alerts/timelineGraph';
|
||||
|
||||
const timelineGraph = async (
|
||||
props: GetTimelineGraphRequestProps,
|
||||
): Promise<
|
||||
SuccessResponse<AlertRuleTimelineGraphResponsePayload> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.post(
|
||||
`/rules/${props.id}/history/overall_status`,
|
||||
{
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
},
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default timelineGraph;
|
||||
36
frontend/src/api/alerts/timelineTable.ts
Normal file
36
frontend/src/api/alerts/timelineTable.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleTimelineTableResponsePayload } from 'types/api/alerts/def';
|
||||
import { GetTimelineTableRequestProps } from 'types/api/alerts/timelineTable';
|
||||
|
||||
const timelineTable = async (
|
||||
props: GetTimelineTableRequestProps,
|
||||
): Promise<
|
||||
SuccessResponse<AlertRuleTimelineTableResponsePayload> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.post(`/rules/${props.id}/history/timeline`, {
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
offset: props.offset,
|
||||
limit: props.limit,
|
||||
order: props.order,
|
||||
state: props.state,
|
||||
// TODO(shaheer): implement filters
|
||||
filters: props.filters,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default timelineTable;
|
||||
33
frontend/src/api/alerts/topContributors.ts
Normal file
33
frontend/src/api/alerts/topContributors.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleTopContributorsPayload } from 'types/api/alerts/def';
|
||||
import { TopContributorsProps } from 'types/api/alerts/topContributors';
|
||||
|
||||
const topContributors = async (
|
||||
props: TopContributorsProps,
|
||||
): Promise<
|
||||
SuccessResponse<AlertRuleTopContributorsPayload> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.post(
|
||||
`/rules/${props.id}/history/top_contributors`,
|
||||
{
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
},
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default topContributors;
|
||||
@@ -3,7 +3,7 @@ const apiV1 = '/api/v1/';
|
||||
export const apiV2 = '/api/v2/';
|
||||
export const apiV3 = '/api/v3/';
|
||||
export const apiV4 = '/api/v4/';
|
||||
export const gatewayApiV1 = '/api/gateway/v1';
|
||||
export const apiAlertManager = '/api/alertmanager';
|
||||
export const gatewayApiV1 = '/api/gateway/v1/';
|
||||
export const apiAlertManager = '/api/alertmanager/';
|
||||
|
||||
export default apiV1;
|
||||
|
||||
62
frontend/src/api/common/getQueryStats.ts
Normal file
62
frontend/src/api/common/getQueryStats.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||
import { ENVIRONMENT } from 'constants/env';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
|
||||
export interface WsDataEvent {
|
||||
read_rows: number;
|
||||
read_bytes: number;
|
||||
elapsed_ms: number;
|
||||
}
|
||||
interface GetQueryStatsProps {
|
||||
queryId: string;
|
||||
setData: React.Dispatch<React.SetStateAction<WsDataEvent | undefined>>;
|
||||
}
|
||||
|
||||
function getURL(baseURL: string, queryId: string): URL | string {
|
||||
if (baseURL && !isEmpty(baseURL)) {
|
||||
return `${baseURL}/ws/query_progress?q=${queryId}`;
|
||||
}
|
||||
const url = new URL(`/ws/query_progress?q=${queryId}`, window.location.href);
|
||||
|
||||
if (window.location.protocol === 'http:') {
|
||||
url.protocol = 'ws';
|
||||
} else {
|
||||
url.protocol = 'wss';
|
||||
}
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
export function getQueryStats(props: GetQueryStatsProps): void {
|
||||
const { queryId, setData } = props;
|
||||
|
||||
const token = getLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN) || '';
|
||||
|
||||
// https://github.com/whatwg/websockets/issues/20 reason for not using the relative URLs
|
||||
const url = getURL(ENVIRONMENT.wsURL, queryId);
|
||||
|
||||
const socket = new WebSocket(url, token);
|
||||
|
||||
socket.addEventListener('message', (event) => {
|
||||
try {
|
||||
const parsedData = JSON.parse(event?.data);
|
||||
setData(parsedData);
|
||||
} catch {
|
||||
setData(event?.data);
|
||||
}
|
||||
});
|
||||
|
||||
socket.addEventListener('error', (event) => {
|
||||
console.error(event);
|
||||
});
|
||||
|
||||
socket.addEventListener('close', (event) => {
|
||||
// 1000 is a normal closure status code
|
||||
if (event.code !== 1000) {
|
||||
console.error('WebSocket closed with error:', event);
|
||||
} else {
|
||||
console.error('WebSocket closed normally.');
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import axios from 'api';
|
||||
import { ApiBaseInstance as axios } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -21,6 +21,7 @@ const logEvent = async (
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { ApiV2Instance as axios } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import getStartEndRangeTime from 'lib/getStartEndRangeTime';
|
||||
import store from 'store';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import {
|
||||
Props,
|
||||
@@ -11,7 +13,26 @@ const dashboardVariablesQuery = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<VariableResponseProps> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post(`/variables/query`, props);
|
||||
const { globalTime } = store.getState();
|
||||
const { start, end } = getStartEndRangeTime({
|
||||
type: 'GLOBAL_TIME',
|
||||
interval: globalTime.selectedTime,
|
||||
});
|
||||
|
||||
const timeVariables: Record<string, number> = {
|
||||
start_timestamp_ms: parseInt(start, 10) * 1e3,
|
||||
end_timestamp_ms: parseInt(end, 10) * 1e3,
|
||||
start_timestamp_nano: parseInt(start, 10) * 1e9,
|
||||
end_timestamp_nano: parseInt(end, 10) * 1e9,
|
||||
start_timestamp: parseInt(start, 10),
|
||||
end_timestamp: parseInt(end, 10),
|
||||
};
|
||||
|
||||
const payload = { ...props };
|
||||
|
||||
payload.variables = { ...payload.variables, ...timeVariables };
|
||||
|
||||
const response = await axios.post(`/variables/query`, payload);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
|
||||
@@ -96,6 +96,10 @@ const interceptorRejected = async (
|
||||
}
|
||||
};
|
||||
|
||||
const interceptorRejectedBase = async (
|
||||
value: AxiosResponse<any>,
|
||||
): Promise<AxiosResponse<any>> => Promise.reject(value);
|
||||
|
||||
const instance = axios.create({
|
||||
baseURL: `${ENVIRONMENT.baseURL}${apiV1}`,
|
||||
});
|
||||
@@ -140,6 +144,18 @@ ApiV4Instance.interceptors.response.use(
|
||||
ApiV4Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||
//
|
||||
|
||||
// axios Base
|
||||
export const ApiBaseInstance = axios.create({
|
||||
baseURL: `${ENVIRONMENT.baseURL}${apiV1}`,
|
||||
});
|
||||
|
||||
ApiBaseInstance.interceptors.response.use(
|
||||
interceptorsResponse,
|
||||
interceptorRejectedBase,
|
||||
);
|
||||
ApiBaseInstance.interceptors.request.use(interceptorsRequestResponse);
|
||||
//
|
||||
|
||||
// gateway Api V1
|
||||
export const GatewayApiV1Instance = axios.create({
|
||||
baseURL: `${ENVIRONMENT.baseURL}${gatewayApiV1}`,
|
||||
|
||||
@@ -12,10 +12,13 @@ export const getMetricsQueryRange = async (
|
||||
props: QueryRangePayload,
|
||||
version: string,
|
||||
signal: AbortSignal,
|
||||
headers?: Record<string, string>,
|
||||
): Promise<SuccessResponse<MetricRangePayloadV3> | ErrorResponse> => {
|
||||
try {
|
||||
if (version && version === ENTITY_VERSION_V4) {
|
||||
const response = await ApiV4Instance.post('/query_range', props, { signal });
|
||||
const response = await ApiV4Instance.post('/query_range', props, {
|
||||
signal,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
@@ -26,7 +29,10 @@ export const getMetricsQueryRange = async (
|
||||
};
|
||||
}
|
||||
|
||||
const response = await ApiV3Instance.post('/query_range', props, { signal });
|
||||
const response = await ApiV3Instance.post('/query_range', props, {
|
||||
signal,
|
||||
headers,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
import axios from 'api';
|
||||
import { isNil } from 'lodash-es';
|
||||
|
||||
const getTopLevelOperations = async (): Promise<ServiceDataProps> => {
|
||||
const response = await axios.post(`/service/top_level_operations`);
|
||||
interface GetTopLevelOperationsProps {
|
||||
service?: string;
|
||||
start?: number;
|
||||
end?: number;
|
||||
}
|
||||
|
||||
const getTopLevelOperations = async (
|
||||
props: GetTopLevelOperationsProps,
|
||||
): Promise<ServiceDataProps> => {
|
||||
const response = await axios.post(`/service/top_level_operations`, {
|
||||
start: !isNil(props.start) ? `${props.start}` : undefined,
|
||||
end: !isNil(props.end) ? `${props.end}` : undefined,
|
||||
service: props.service,
|
||||
});
|
||||
return response.data;
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { Dayjs } from 'dayjs';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
|
||||
import { Recurrence } from './getAllDowntimeSchedules';
|
||||
@@ -11,8 +12,8 @@ export interface DowntimeSchedulePayload {
|
||||
alertIds: string[];
|
||||
schedule: {
|
||||
timezone?: string;
|
||||
startTime?: string;
|
||||
endTime?: string;
|
||||
startTime?: string | Dayjs;
|
||||
endTime?: string | Dayjs;
|
||||
recurrence?: Recurrence;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios from 'api';
|
||||
import { AxiosError, AxiosResponse } from 'axios';
|
||||
import { Option } from 'container/PlannedDowntime/DropdownWithSubMenu/DropdownWithSubMenu';
|
||||
import { Option } from 'container/PlannedDowntime/PlannedDowntimeutils';
|
||||
import { useQuery, UseQueryResult } from 'react-query';
|
||||
|
||||
export type Recurrence = {
|
||||
@@ -28,6 +28,7 @@ export interface DowntimeSchedules {
|
||||
createdBy: string | null;
|
||||
updatedAt: string | null;
|
||||
updatedBy: string | null;
|
||||
kind: string | null;
|
||||
}
|
||||
export type PayloadProps = { data: DowntimeSchedules[] };
|
||||
|
||||
|
||||
63
frontend/src/api/queryBuilder/getAttributeSuggestions.ts
Normal file
63
frontend/src/api/queryBuilder/getAttributeSuggestions.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { ApiV3Instance } from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError, AxiosResponse } from 'axios';
|
||||
import { baseAutoCompleteIdKeysOrder } from 'constants/queryBuilder';
|
||||
import { encode } from 'js-base64';
|
||||
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
|
||||
import createQueryParams from 'lib/createQueryParams';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import {
|
||||
IGetAttributeSuggestionsPayload,
|
||||
IGetAttributeSuggestionsSuccessResponse,
|
||||
} from 'types/api/queryBuilder/getAttributeSuggestions';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
export const getAttributeSuggestions = async ({
|
||||
searchText,
|
||||
dataSource,
|
||||
filters,
|
||||
}: IGetAttributeSuggestionsPayload): Promise<
|
||||
SuccessResponse<IGetAttributeSuggestionsSuccessResponse> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
let base64EncodedFiltersString;
|
||||
try {
|
||||
// the replace function is to remove the padding at the end of base64 encoded string which is auto added to make it a multiple of 4
|
||||
// why ? because the current working of qs doesn't work well with padding
|
||||
base64EncodedFiltersString = encode(JSON.stringify(filters)).replace(
|
||||
/=+$/,
|
||||
'',
|
||||
);
|
||||
} catch {
|
||||
// default base64 encoded string for empty filters object
|
||||
base64EncodedFiltersString = 'eyJpdGVtcyI6W10sIm9wIjoiQU5EIn0';
|
||||
}
|
||||
const response: AxiosResponse<{
|
||||
data: IGetAttributeSuggestionsSuccessResponse;
|
||||
}> = await ApiV3Instance.get(
|
||||
`/filter_suggestions?${createQueryParams({
|
||||
searchText,
|
||||
dataSource,
|
||||
existingFilter: base64EncodedFiltersString,
|
||||
})}`,
|
||||
);
|
||||
|
||||
const payload: BaseAutocompleteData[] =
|
||||
response.data.data.attributes?.map(({ id: _, ...item }) => ({
|
||||
...item,
|
||||
id: createIdFromObjectFields(item, baseAutoCompleteIdKeysOrder),
|
||||
})) || [];
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.statusText,
|
||||
payload: {
|
||||
attributes: payload,
|
||||
example_queries: response.data.data.example_queries,
|
||||
},
|
||||
};
|
||||
} catch (e) {
|
||||
return ErrorResponseHandler(e as AxiosError);
|
||||
}
|
||||
};
|
||||
41
frontend/src/assets/AlertHistory/ConfigureIcon.tsx
Normal file
41
frontend/src/assets/AlertHistory/ConfigureIcon.tsx
Normal file
@@ -0,0 +1,41 @@
|
||||
interface ConfigureIconProps {
|
||||
width?: number;
|
||||
height?: number;
|
||||
fill?: string;
|
||||
}
|
||||
|
||||
function ConfigureIcon({
|
||||
width,
|
||||
height,
|
||||
fill,
|
||||
}: ConfigureIconProps): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width={width}
|
||||
height={height}
|
||||
fill={fill}
|
||||
>
|
||||
<path
|
||||
stroke="#C0C1C3"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
strokeWidth="1.333"
|
||||
d="M9.71 4.745a.576.576 0 000 .806l.922.922a.576.576 0 00.806 0l2.171-2.171a3.455 3.455 0 01-4.572 4.572l-3.98 3.98a1.222 1.222 0 11-1.727-1.728l3.98-3.98a3.455 3.455 0 014.572-4.572L9.717 4.739l-.006.006z"
|
||||
/>
|
||||
<path
|
||||
stroke="#C0C1C3"
|
||||
strokeLinecap="round"
|
||||
strokeWidth="1.333"
|
||||
d="M4 7L2.527 5.566a1.333 1.333 0 01-.013-1.898l.81-.81a1.333 1.333 0 011.991.119L5.333 3m5.417 7.988l1.179 1.178m0 0l-.138.138a.833.833 0 00.387 1.397v0a.833.833 0 00.792-.219l.446-.446a.833.833 0 00.176-.917v0a.833.833 0 00-1.355-.261l-.308.308z"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
ConfigureIcon.defaultProps = {
|
||||
width: 16,
|
||||
height: 16,
|
||||
fill: 'none',
|
||||
};
|
||||
export default ConfigureIcon;
|
||||
65
frontend/src/assets/AlertHistory/LogsIcon.tsx
Normal file
65
frontend/src/assets/AlertHistory/LogsIcon.tsx
Normal file
@@ -0,0 +1,65 @@
|
||||
interface LogsIconProps {
|
||||
width?: number;
|
||||
height?: number;
|
||||
fill?: string;
|
||||
strokeColor?: string;
|
||||
strokeWidth?: number;
|
||||
}
|
||||
|
||||
function LogsIcon({
|
||||
width,
|
||||
height,
|
||||
fill,
|
||||
strokeColor,
|
||||
strokeWidth,
|
||||
}: LogsIconProps): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width={width}
|
||||
height={height}
|
||||
fill={fill}
|
||||
>
|
||||
<path
|
||||
stroke={strokeColor}
|
||||
strokeWidth={strokeWidth}
|
||||
d="M2.917 3.208v7.875"
|
||||
/>
|
||||
<ellipse
|
||||
cx="6.417"
|
||||
cy="3.208"
|
||||
stroke={strokeColor}
|
||||
strokeWidth={strokeWidth}
|
||||
rx="3.5"
|
||||
ry="1.458"
|
||||
/>
|
||||
<ellipse cx="6.417" cy="3.165" fill={strokeColor} rx="0.875" ry="0.365" />
|
||||
<path
|
||||
stroke={strokeColor}
|
||||
strokeWidth={strokeWidth}
|
||||
d="M9.917 11.083c0 .645-1.567 1.167-3.5 1.167s-3.5-.522-3.5-1.167"
|
||||
/>
|
||||
<path
|
||||
stroke={strokeColor}
|
||||
strokeLinecap="round"
|
||||
strokeWidth={strokeWidth}
|
||||
d="M5.25 6.417v1.117c0 .028.02.053.049.057l1.652.276A.058.058 0 017 7.924v1.993"
|
||||
/>
|
||||
<path
|
||||
stroke={strokeColor}
|
||||
strokeWidth={strokeWidth}
|
||||
d="M9.917 3.208v3.103c0 .046.05.074.089.05L12.182 5a.058.058 0 01.088.035l.264 1.059a.058.058 0 01-.013.053l-2.59 2.877a.058.058 0 00-.014.04v2.018"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
LogsIcon.defaultProps = {
|
||||
width: 14,
|
||||
height: 14,
|
||||
fill: 'none',
|
||||
strokeColor: '#C0C1C3',
|
||||
strokeWidth: 1.167,
|
||||
};
|
||||
|
||||
export default LogsIcon;
|
||||
39
frontend/src/assets/AlertHistory/SeverityCriticalIcon.tsx
Normal file
39
frontend/src/assets/AlertHistory/SeverityCriticalIcon.tsx
Normal file
@@ -0,0 +1,39 @@
|
||||
interface SeverityCriticalIconProps {
|
||||
width?: number;
|
||||
height?: number;
|
||||
fill?: string;
|
||||
stroke?: string;
|
||||
}
|
||||
|
||||
function SeverityCriticalIcon({
|
||||
width,
|
||||
height,
|
||||
fill,
|
||||
stroke,
|
||||
}: SeverityCriticalIconProps): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
width={width}
|
||||
height={height}
|
||||
fill={fill}
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M.99707.666056.99707 2.99939M.99707 5.33337H.991237M3.00293.666056 3.00293 2.99939M3.00293 5.33337H2.9971M5.00879.666056V2.99939M5.00879 5.33337H5.00296"
|
||||
stroke={stroke}
|
||||
strokeWidth="1.16667"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
SeverityCriticalIcon.defaultProps = {
|
||||
width: 6,
|
||||
height: 6,
|
||||
fill: 'none',
|
||||
stroke: '#F56C87',
|
||||
};
|
||||
|
||||
export default SeverityCriticalIcon;
|
||||
42
frontend/src/assets/AlertHistory/SeverityErrorIcon.tsx
Normal file
42
frontend/src/assets/AlertHistory/SeverityErrorIcon.tsx
Normal file
@@ -0,0 +1,42 @@
|
||||
interface SeverityErrorIconProps {
|
||||
width?: number;
|
||||
height?: number;
|
||||
fill?: string;
|
||||
stroke?: string;
|
||||
strokeWidth?: string;
|
||||
}
|
||||
|
||||
function SeverityErrorIcon({
|
||||
width,
|
||||
height,
|
||||
fill,
|
||||
stroke,
|
||||
strokeWidth,
|
||||
}: SeverityErrorIconProps): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
width={width}
|
||||
height={height}
|
||||
fill={fill}
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M1.00781.957845 1.00781 2.99951M1.00781 5.04175H1.00228"
|
||||
stroke={stroke}
|
||||
strokeWidth={strokeWidth}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
SeverityErrorIcon.defaultProps = {
|
||||
width: 2,
|
||||
height: 6,
|
||||
fill: 'none',
|
||||
stroke: '#F56C87',
|
||||
strokeWidth: '1.02083',
|
||||
};
|
||||
|
||||
export default SeverityErrorIcon;
|
||||
46
frontend/src/assets/AlertHistory/SeverityInfoIcon.tsx
Normal file
46
frontend/src/assets/AlertHistory/SeverityInfoIcon.tsx
Normal file
@@ -0,0 +1,46 @@
|
||||
interface SeverityInfoIconProps {
|
||||
width?: number;
|
||||
height?: number;
|
||||
fill?: string;
|
||||
stroke?: string;
|
||||
}
|
||||
|
||||
function SeverityInfoIcon({
|
||||
width,
|
||||
height,
|
||||
fill,
|
||||
stroke,
|
||||
}: SeverityInfoIconProps): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
width={width}
|
||||
height={height}
|
||||
fill={fill}
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<rect
|
||||
width={width}
|
||||
height={height}
|
||||
rx="3.5"
|
||||
fill={stroke}
|
||||
fillOpacity=".2"
|
||||
/>
|
||||
<path
|
||||
d="M7 9.33346V7.00012M7 4.66675H7.00583"
|
||||
stroke={stroke}
|
||||
strokeWidth="1.16667"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
SeverityInfoIcon.defaultProps = {
|
||||
width: 14,
|
||||
height: 14,
|
||||
fill: 'none',
|
||||
stroke: '#7190F9',
|
||||
};
|
||||
|
||||
export default SeverityInfoIcon;
|
||||
42
frontend/src/assets/AlertHistory/SeverityWarningIcon.tsx
Normal file
42
frontend/src/assets/AlertHistory/SeverityWarningIcon.tsx
Normal file
@@ -0,0 +1,42 @@
|
||||
interface SeverityWarningIconProps {
|
||||
width?: number;
|
||||
height?: number;
|
||||
fill?: string;
|
||||
stroke?: string;
|
||||
strokeWidth?: string;
|
||||
}
|
||||
|
||||
function SeverityWarningIcon({
|
||||
width,
|
||||
height,
|
||||
fill,
|
||||
stroke,
|
||||
strokeWidth,
|
||||
}: SeverityWarningIconProps): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
width={width}
|
||||
height={height}
|
||||
fill={fill}
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M1.00732.957845 1.00732 2.99951M1.00732 5.04175H1.00179"
|
||||
stroke={stroke}
|
||||
strokeWidth={strokeWidth}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
SeverityWarningIcon.defaultProps = {
|
||||
width: 2,
|
||||
height: 6,
|
||||
fill: 'none',
|
||||
stroke: '#FFD778',
|
||||
strokeWidth: '0.978299',
|
||||
};
|
||||
|
||||
export default SeverityWarningIcon;
|
||||
27
frontend/src/assets/CustomIcons/GroupByIcon.tsx
Normal file
27
frontend/src/assets/CustomIcons/GroupByIcon.tsx
Normal file
@@ -0,0 +1,27 @@
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
|
||||
function GroupByIcon(): JSX.Element {
|
||||
const isDarkMode = useIsDarkMode();
|
||||
return (
|
||||
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g
|
||||
clipPath="url(#prefix__clip0_4344_1236)"
|
||||
stroke={isDarkMode ? Color.BG_VANILLA_100 : Color.BG_INK_500}
|
||||
strokeWidth="1.167"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
>
|
||||
<path d="M4.667 1.167H2.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V2.333c0-.644-.522-1.166-1.166-1.166zM8.167 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M11.667 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M5.833 10.5H2.917c-.992 0-1.75-.758-1.75-1.75v-.583" />
|
||||
<path d="M4.083 12.25l1.75-1.75-1.75-1.75M11.667 8.167H9.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V9.333c0-.644-.522-1.166-1.166-1.166z" />
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="prefix__clip0_4344_1236">
|
||||
<path fill="#fff" d="M0 0h14v14H0z" />
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default GroupByIcon;
|
||||
@@ -0,0 +1,14 @@
|
||||
.reset-button {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
background: var(--bg-ink-300);
|
||||
border: 1px solid var(--bg-slate-400);
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.reset-button {
|
||||
background: var(--bg-vanilla-100);
|
||||
border-color: var(--bg-vanilla-300);
|
||||
}
|
||||
}
|
||||
11
frontend/src/components/AlertDetailsFilters/Filters.tsx
Normal file
11
frontend/src/components/AlertDetailsFilters/Filters.tsx
Normal file
@@ -0,0 +1,11 @@
|
||||
import './Filters.styles.scss';
|
||||
|
||||
import DateTimeSelector from 'container/TopNav/DateTimeSelectionV2';
|
||||
|
||||
export function Filters(): JSX.Element {
|
||||
return (
|
||||
<div className="filters">
|
||||
<DateTimeSelector showAutoRefresh={false} hideShareModal showResetButton />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,137 @@
|
||||
import { Button, Modal, Typography } from 'antd';
|
||||
import updateCreditCardApi from 'api/billing/checkout';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||
import useLicense from 'hooks/useLicense';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import { CreditCard, X } from 'lucide-react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useMutation } from 'react-query';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { CheckoutSuccessPayloadProps } from 'types/api/billing/checkout';
|
||||
import { License } from 'types/api/licenses/def';
|
||||
|
||||
export default function ChatSupportGateway(): JSX.Element {
|
||||
const { notifications } = useNotifications();
|
||||
const [activeLicense, setActiveLicense] = useState<License | null>(null);
|
||||
|
||||
const [isAddCreditCardModalOpen, setIsAddCreditCardModalOpen] = useState(
|
||||
false,
|
||||
);
|
||||
|
||||
const { data: licenseData, isFetching } = useLicense();
|
||||
|
||||
useEffect(() => {
|
||||
const activeValidLicense =
|
||||
licenseData?.payload?.licenses?.find(
|
||||
(license) => license.isCurrent === true,
|
||||
) || null;
|
||||
|
||||
setActiveLicense(activeValidLicense);
|
||||
}, [licenseData, isFetching]);
|
||||
|
||||
const handleBillingOnSuccess = (
|
||||
data: ErrorResponse | SuccessResponse<CheckoutSuccessPayloadProps, unknown>,
|
||||
): void => {
|
||||
if (data?.payload?.redirectURL) {
|
||||
const newTab = document.createElement('a');
|
||||
newTab.href = data.payload.redirectURL;
|
||||
newTab.target = '_blank';
|
||||
newTab.rel = 'noopener noreferrer';
|
||||
newTab.click();
|
||||
}
|
||||
};
|
||||
|
||||
const handleBillingOnError = (): void => {
|
||||
notifications.error({
|
||||
message: SOMETHING_WENT_WRONG,
|
||||
});
|
||||
};
|
||||
|
||||
const { mutate: updateCreditCard, isLoading: isLoadingBilling } = useMutation(
|
||||
updateCreditCardApi,
|
||||
{
|
||||
onSuccess: (data) => {
|
||||
handleBillingOnSuccess(data);
|
||||
},
|
||||
onError: handleBillingOnError,
|
||||
},
|
||||
);
|
||||
const { pathname } = useLocation();
|
||||
const handleAddCreditCard = (): void => {
|
||||
logEvent('Add Credit card modal: Clicked', {
|
||||
source: `intercom icon`,
|
||||
page: pathname,
|
||||
});
|
||||
|
||||
updateCreditCard({
|
||||
licenseKey: activeLicense?.key || '',
|
||||
successURL: window.location.href,
|
||||
cancelURL: window.location.href,
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="chat-support-gateway">
|
||||
<Button
|
||||
className="chat-support-gateway-btn"
|
||||
onClick={(): void => {
|
||||
logEvent('Disabled Chat Support: Clicked', {
|
||||
source: `intercom icon`,
|
||||
page: pathname,
|
||||
});
|
||||
|
||||
setIsAddCreditCardModalOpen(true);
|
||||
}}
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 28 32"
|
||||
className="chat-support-gateway-btn-icon"
|
||||
>
|
||||
<path d="M28 32s-4.714-1.855-8.527-3.34H3.437C1.54 28.66 0 27.026 0 25.013V3.644C0 1.633 1.54 0 3.437 0h21.125c1.898 0 3.437 1.632 3.437 3.645v18.404H28V32zm-4.139-11.982a.88.88 0 00-1.292-.105c-.03.026-3.015 2.681-8.57 2.681-5.486 0-8.517-2.636-8.571-2.684a.88.88 0 00-1.29.107 1.01 1.01 0 00-.219.708.992.992 0 00.318.664c.142.128 3.537 3.15 9.762 3.15 6.226 0 9.621-3.022 9.763-3.15a.992.992 0 00.317-.664 1.01 1.01 0 00-.218-.707z" />
|
||||
</svg>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Add Credit Card Modal */}
|
||||
<Modal
|
||||
className="add-credit-card-modal"
|
||||
title={<span className="title">Add Credit Card for Chat Support</span>}
|
||||
open={isAddCreditCardModalOpen}
|
||||
closable
|
||||
onCancel={(): void => setIsAddCreditCardModalOpen(false)}
|
||||
destroyOnClose
|
||||
footer={[
|
||||
<Button
|
||||
key="cancel"
|
||||
onClick={(): void => setIsAddCreditCardModalOpen(false)}
|
||||
className="cancel-btn"
|
||||
icon={<X size={16} />}
|
||||
>
|
||||
Cancel
|
||||
</Button>,
|
||||
<Button
|
||||
key="submit"
|
||||
type="primary"
|
||||
icon={<CreditCard size={16} />}
|
||||
size="middle"
|
||||
loading={isLoadingBilling}
|
||||
disabled={isLoadingBilling}
|
||||
onClick={handleAddCreditCard}
|
||||
className="add-credit-card-btn"
|
||||
>
|
||||
Add Credit Card
|
||||
</Button>,
|
||||
]}
|
||||
>
|
||||
<Typography.Text className="add-credit-card-text">
|
||||
You're currently on <span className="highlight-text">Trial plan</span>
|
||||
. Add a credit card to access SigNoz chat support to your workspace.
|
||||
</Typography.Text>
|
||||
</Modal>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -12,6 +12,20 @@ beforeAll(() => {
|
||||
matchMedia();
|
||||
});
|
||||
|
||||
jest.mock('uplot', () => {
|
||||
const paths = {
|
||||
spline: jest.fn(),
|
||||
bars: jest.fn(),
|
||||
};
|
||||
const uplotMock = jest.fn(() => ({
|
||||
paths,
|
||||
}));
|
||||
return {
|
||||
paths,
|
||||
default: uplotMock,
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('react-dnd', () => ({
|
||||
useDrop: jest.fn().mockImplementation(() => [jest.fn(), jest.fn(), jest.fn()]),
|
||||
useDrag: jest.fn().mockImplementation(() => [jest.fn(), jest.fn(), jest.fn()]),
|
||||
|
||||
@@ -139,6 +139,7 @@ export const getGraphOptions = (
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
stacked: isStacked,
|
||||
grid: {
|
||||
display: true,
|
||||
color: getGridColor(),
|
||||
@@ -165,6 +166,7 @@ export const getGraphOptions = (
|
||||
ticks: { color: getAxisLabelColor(currentTheme) },
|
||||
},
|
||||
y: {
|
||||
stacked: isStacked,
|
||||
display: true,
|
||||
grid: {
|
||||
display: true,
|
||||
@@ -178,9 +180,6 @@ export const getGraphOptions = (
|
||||
},
|
||||
},
|
||||
},
|
||||
stacked: {
|
||||
display: isStacked === undefined ? false : 'auto',
|
||||
},
|
||||
},
|
||||
elements: {
|
||||
line: {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user