Compare commits
2 Commits
v0.56.0-cl
...
error-resp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a58d70079b | ||
|
|
2ae341cfa9 |
6
.github/CODEOWNERS
vendored
@@ -5,6 +5,6 @@
|
||||
/frontend/ @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/deploy/ @SigNoz/devops
|
||||
/sample-apps/ @SigNoz/devops
|
||||
.github @SigNoz/devops
|
||||
/deploy/ @prashant-shahi
|
||||
/sample-apps/ @prashant-shahi
|
||||
.github @prashant-shahi
|
||||
|
||||
49
.github/ISSUE_TEMPLATE/request_dashboard.md
vendored
@@ -1,49 +0,0 @@
|
||||
---
|
||||
name: Request Dashboard
|
||||
about: Request a new dashboard for the SigNoz Dashboards repository
|
||||
title: '[Dashboard Request] '
|
||||
labels: 'dashboard-template'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Use this template to request a new dashboard for the SigNoz Dashboards repository. Providing detailed information will help us understand your needs better and speed up the dashboard creation process. -->
|
||||
|
||||
## Dashboard Name
|
||||
|
||||
<!-- Provide the name for the requested dashboard. Be specific (e.g., "MySQL Monitoring Dashboard"). -->
|
||||
|
||||
## Expected Dashboard Sections and Panels
|
||||
|
||||
(Can be tweaked (add or remove panels/sections) according to available metrics)
|
||||
|
||||
### Section Name
|
||||
|
||||
<!-- Brief description of what this section should display (e.g., "Resource usage metrics for MySQL database"). -->
|
||||
|
||||
### Panel Name
|
||||
|
||||
<!-- Description of the panel (e.g., "Displays current CPU usage, memory usage, etc."). -->
|
||||
|
||||
<!-- - **Example:**
|
||||
- **Section**: Resource Metrics
|
||||
- **Panel**: CPU Usage - Displays the current CPU usage across all database instances.
|
||||
- **Panel**: Memory Usage - Displays the total memory used by the MySQL process. -->
|
||||
|
||||
<!-- Repeat this format for any additional sections or panels. -->
|
||||
|
||||
## Expected Dashboard Variables
|
||||
|
||||
<!-- List any dashboard variables that should be included in the dashboard. Examples could be `deployment.environment`, `hostname`, `region`, etc. -->
|
||||
|
||||
## Additional Comments or Requirements
|
||||
|
||||
<!-- Include any other details, special requirements, or specific visualizations you'd like to request for this dashboard. -->
|
||||
|
||||
## References or Screenshots
|
||||
|
||||
<!-- Add any references or screenshots of requested dashboard if available. -->
|
||||
|
||||
## 📋 Notes
|
||||
|
||||
Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request.
|
||||
8
.github/workflows/build.yaml
vendored
@@ -8,13 +8,6 @@ on:
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
check-no-ee-references:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run check
|
||||
run: make check-no-ee-references
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -43,6 +36,7 @@ jobs:
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
|
||||
3
.github/workflows/push.yaml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
|
||||
image-build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -150,13 +151,13 @@ jobs:
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
|
||||
3
.github/workflows/staging-deployment.yaml
vendored
@@ -30,7 +30,6 @@ jobs:
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
@@ -52,4 +51,4 @@ jobs:
|
||||
make build-frontend-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
|
||||
3
.github/workflows/testing-deployment.yaml
vendored
@@ -30,7 +30,6 @@ jobs:
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
@@ -53,4 +52,4 @@ jobs:
|
||||
make build-frontend-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
|
||||
3
.gitignore
vendored
@@ -67,6 +67,3 @@ e2e/.auth
|
||||
# go
|
||||
vendor/
|
||||
**/main/**
|
||||
|
||||
# git-town
|
||||
.git-branches.toml
|
||||
|
||||
7
.scripts/commentLinesForSetup.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||
|
||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
@@ -30,7 +30,6 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
|
||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
|
||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||
|
||||
# 1. General Instructions 📝
|
||||
@@ -38,7 +37,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
|
||||
## 1.1 For Creating Issue(s)
|
||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
|
||||
#### Details like these are incredibly useful:
|
||||
|
||||
@@ -57,7 +56,7 @@ Before making any significant changes and before filing a new issue, please chec
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone 🙌.
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -98,14 +97,13 @@ GitHub provides additional document on [forking a repository](https://help.githu
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
||||
|
||||
### Pointers:
|
||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -119,7 +117,7 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
|
||||
|
||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -129,13 +127,14 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
|
||||
|
||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
|
||||
|
||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||
|
||||
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
|
||||
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -189,7 +188,7 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
**[`^top^`](#)**
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
@@ -210,7 +209,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
|
||||
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -310,7 +309,7 @@ Click the button below. A workspace with all required environments will be creat
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
@@ -348,7 +347,7 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**5.1.3 To stop the load generation:**
|
||||
@@ -366,21 +365,10 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
**[`^top^`](#)**
|
||||
|
||||
---
|
||||
|
||||
# 6. Contribute to Dashboards 📈
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
|
||||
|
||||
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
|
||||
|
||||
1. Create a dashboard JSON file.
|
||||
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
|
||||
3. Include screenshots of the dashboard in the `assets/` directory.
|
||||
4. Submit a pull request for review.
|
||||
|
||||
## Other Ways to Contribute
|
||||
|
||||
There are many other ways to get involved with the community and to participate in this project:
|
||||
@@ -391,6 +379,7 @@ There are many other ways to get involved with the community and to participate
|
||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||
- Tell others about the project on Twitter, your blog, etc.
|
||||
|
||||
|
||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
Thank You!
|
||||
|
||||
10
Makefile
@@ -178,15 +178,6 @@ clear-swarm-ch:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
check-no-ee-references:
|
||||
@echo "Checking for 'ee' package references in 'pkg' directory..."
|
||||
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
|
||||
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
|
||||
exit 1; \
|
||||
else \
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"; \
|
||||
fi
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/app/metrics/...
|
||||
go test ./pkg/query-service/cache/...
|
||||
@@ -197,4 +188,3 @@ test:
|
||||
go test ./pkg/query-service/tests/integration/...
|
||||
go test ./pkg/query-service/rules/...
|
||||
go test ./pkg/query-service/collectorsimulator/...
|
||||
go test ./pkg/query-service/postprocess/...
|
||||
|
||||
@@ -198,14 +198,14 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Vikrant Gupta](https://github.com/vikrantgupta25)
|
||||
- [Sagar Rajput](https://github.com/SagarRajput-7)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Vibhu Pandey](https://github.com/grandwizard28)
|
||||
- [Dhawal Sanghvi](https://github.com/dhawal1248)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
@@ -23,9 +23,6 @@
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>information</level>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
</formatting>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
@@ -652,12 +649,12 @@
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||
-->
|
||||
|
||||
<!--
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>example01-01-1</replica>
|
||||
</macros>
|
||||
|
||||
-->
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
|
||||
@@ -146,11 +146,11 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.55.0
|
||||
image: signoz/query-service:0.46.0
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -186,7 +186,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.55.0
|
||||
image: signoz/frontend:0.46.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -199,7 +199,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.102.10
|
||||
image: signoz/signoz-otel-collector:0.88.24
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@@ -211,7 +211,6 @@ services:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
@@ -238,7 +237,7 @@ services:
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.102.10
|
||||
image: signoz/signoz-schema-migrator:0.88.24
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -36,7 +36,6 @@ receivers:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
@@ -144,7 +143,6 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
@@ -155,8 +153,6 @@ extensions:
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
|
||||
@@ -23,9 +23,6 @@
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>information</level>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
</formatting>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
@@ -652,12 +649,12 @@
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||
-->
|
||||
|
||||
<!--
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>example01-01-1</replica>
|
||||
</macros>
|
||||
|
||||
-->
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
|
||||
@@ -66,7 +66,7 @@ services:
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -81,7 +81,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.102.10
|
||||
image: signoz/signoz-otel-collector:0.88.24
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@@ -93,8 +93,6 @@ services:
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
|
||||
@@ -164,13 +164,13 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.55.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"-gateway-url=https://api.staging.signoz.cloud",
|
||||
"--use-logs-new-schema=true"
|
||||
"-gateway-url=https://api.staging.signoz.cloud"
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -204,7 +204,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.55.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -216,7 +216,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -230,7 +230,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.10}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
@@ -244,7 +244,6 @@ services:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
|
||||
@@ -164,12 +164,12 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.55.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
"-config=/root/config/prometheus.yml"
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -203,7 +203,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.55.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -215,7 +215,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@@ -229,7 +229,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.10}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
@@ -243,7 +243,6 @@ services:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
|
||||
@@ -36,7 +36,6 @@ receivers:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
@@ -154,13 +153,10 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# logging: {}
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
@@ -47,14 +42,6 @@ server {
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
proxy_pass http://query-service:8080/ws;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade "websocket";
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
|
||||
@@ -389,7 +389,7 @@ trap bye EXIT
|
||||
|
||||
URL="https://api.segment.io/v1/track"
|
||||
HEADER_1="Content-Type: application/json"
|
||||
HEADER_2="Authorization: Basic OWtScko3b1BDR1BFSkxGNlFqTVBMdDVibGpGaFJRQnI="
|
||||
HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
|
||||
|
||||
send_event() {
|
||||
error=""
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*DailyProvider)(nil)
|
||||
|
||||
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &dp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// NewDailyProvider uses the same generic option type
|
||||
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
|
||||
dp := &DailyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(dp)
|
||||
}
|
||||
|
||||
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
FeatureLookup: dp.ff,
|
||||
})
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*HourlyProvider)(nil)
|
||||
|
||||
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &hp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// NewHourlyProvider now uses the generic option type
|
||||
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
|
||||
hp := &HourlyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(hp)
|
||||
}
|
||||
|
||||
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
FeatureLookup: hp.ff,
|
||||
})
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
type Seasonality string
|
||||
|
||||
const (
|
||||
SeasonalityHourly Seasonality = "hourly"
|
||||
SeasonalityDaily Seasonality = "daily"
|
||||
SeasonalityWeekly Seasonality = "weekly"
|
||||
)
|
||||
|
||||
func (s Seasonality) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
var (
|
||||
oneWeekOffset = 24 * 7 * time.Hour.Milliseconds()
|
||||
oneDayOffset = 24 * time.Hour.Milliseconds()
|
||||
oneHourOffset = time.Hour.Milliseconds()
|
||||
fiveMinOffset = 5 * time.Minute.Milliseconds()
|
||||
)
|
||||
|
||||
func (s Seasonality) IsValid() bool {
|
||||
switch s {
|
||||
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type GetAnomaliesRequest struct {
|
||||
Params *v3.QueryRangeParamsV3
|
||||
Seasonality Seasonality
|
||||
}
|
||||
|
||||
type GetAnomaliesResponse struct {
|
||||
Results []*v3.Result
|
||||
}
|
||||
|
||||
// anomalyParams is the params for anomaly detection
|
||||
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
|
||||
//
|
||||
// ^ ^
|
||||
// | |
|
||||
// (rounded value for past peiod) + (seasonal growth)
|
||||
//
|
||||
// score = abs(value - prediction) / stddev (current_season_query)
|
||||
type anomalyQueryParams struct {
|
||||
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
|
||||
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
|
||||
// The results obtained from this query are used to compare with predicted values
|
||||
// and to detect anomalies
|
||||
CurrentPeriodQuery *v3.QueryRangeParamsV3
|
||||
// PastPeriodQuery is the query range params for past seasonal period
|
||||
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||
PastPeriodQuery *v3.QueryRangeParamsV3
|
||||
// CurrentSeasonQuery is the query range params for current period (seasonal)
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
|
||||
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
|
||||
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
|
||||
CurrentSeasonQuery *v3.QueryRangeParamsV3
|
||||
// PastSeasonQuery is the query range params for past seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
|
||||
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||
PastSeasonQuery *v3.QueryRangeParamsV3
|
||||
|
||||
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
|
||||
Past2SeasonQuery *v3.QueryRangeParamsV3
|
||||
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
|
||||
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
|
||||
Past3SeasonQuery *v3.QueryRangeParamsV3
|
||||
}
|
||||
|
||||
func updateStepInterval(req *v3.QueryRangeParamsV3) {
|
||||
start := req.Start
|
||||
end := req.End
|
||||
|
||||
req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60))
|
||||
for _, q := range req.CompositeQuery.BuilderQueries {
|
||||
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
|
||||
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
|
||||
q.StepInterval = minStep
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams {
|
||||
start := req.Start
|
||||
end := req.End
|
||||
|
||||
currentPeriodQuery := &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(currentPeriodQuery)
|
||||
|
||||
var pastPeriodStart, pastPeriodEnd int64
|
||||
|
||||
switch seasonality {
|
||||
// for one week period, we fetch the data from the past week with 5 min offset
|
||||
case SeasonalityWeekly:
|
||||
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneWeekOffset
|
||||
// for one day period, we fetch the data from the past day with 5 min offset
|
||||
case SeasonalityDaily:
|
||||
pastPeriodStart = start - oneDayOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneDayOffset
|
||||
// for one hour period, we fetch the data from the past hour with 5 min offset
|
||||
case SeasonalityHourly:
|
||||
pastPeriodStart = start - oneHourOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneHourOffset
|
||||
}
|
||||
|
||||
pastPeriodQuery := &v3.QueryRangeParamsV3{
|
||||
Start: pastPeriodStart,
|
||||
End: pastPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(pastPeriodQuery)
|
||||
|
||||
// seasonality growth trend
|
||||
var currentGrowthPeriodStart, currentGrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
currentGrowthPeriodStart = start - oneWeekOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
case SeasonalityDaily:
|
||||
currentGrowthPeriodStart = start - oneDayOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
case SeasonalityHourly:
|
||||
currentGrowthPeriodStart = start - oneHourOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
}
|
||||
|
||||
currentGrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: currentGrowthPeriodStart,
|
||||
End: currentGrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(currentGrowthQuery)
|
||||
|
||||
var pastGrowthPeriodStart, pastGrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
pastGrowthPeriodStart = start - 2*oneWeekOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
pastGrowthPeriodStart = start - 2*oneDayOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
pastGrowthPeriodStart = start - 2*oneHourOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneHourOffset
|
||||
}
|
||||
|
||||
pastGrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: pastGrowthPeriodStart,
|
||||
End: pastGrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(pastGrowthQuery)
|
||||
|
||||
var past2GrowthPeriodStart, past2GrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past2GrowthPeriodStart = start - 3*oneWeekOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past2GrowthPeriodStart = start - 3*oneDayOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past2GrowthPeriodStart = start - 3*oneHourOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneHourOffset
|
||||
}
|
||||
|
||||
past2GrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: past2GrowthPeriodStart,
|
||||
End: past2GrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(past2GrowthQuery)
|
||||
|
||||
var past3GrowthPeriodStart, past3GrowthPeriodEnd int64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past3GrowthPeriodStart = start - 4*oneWeekOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past3GrowthPeriodStart = start - 4*oneDayOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past3GrowthPeriodStart = start - 4*oneHourOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneHourOffset
|
||||
}
|
||||
|
||||
past3GrowthQuery := &v3.QueryRangeParamsV3{
|
||||
Start: past3GrowthPeriodStart,
|
||||
End: past3GrowthPeriodEnd,
|
||||
CompositeQuery: req.CompositeQuery.Clone(),
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}
|
||||
updateStepInterval(past3GrowthQuery)
|
||||
|
||||
return &anomalyQueryParams{
|
||||
CurrentPeriodQuery: currentPeriodQuery,
|
||||
PastPeriodQuery: pastPeriodQuery,
|
||||
CurrentSeasonQuery: currentGrowthQuery,
|
||||
PastSeasonQuery: pastGrowthQuery,
|
||||
Past2SeasonQuery: past2GrowthQuery,
|
||||
Past3SeasonQuery: past3GrowthQuery,
|
||||
}
|
||||
}
|
||||
|
||||
type anomalyQueryResults struct {
|
||||
CurrentPeriodResults []*v3.Result
|
||||
PastPeriodResults []*v3.Result
|
||||
CurrentSeasonResults []*v3.Result
|
||||
PastSeasonResults []*v3.Result
|
||||
Past2SeasonResults []*v3.Result
|
||||
Past3SeasonResults []*v3.Result
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
}
|
||||
@@ -1,466 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(srikanthccv): make this configurable?
|
||||
movingAvgWindowSize = 7
|
||||
)
|
||||
|
||||
// BaseProvider is an interface that includes common methods for all provider types
|
||||
type BaseProvider interface {
|
||||
GetBaseSeasonalProvider() *BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// GenericProviderOption is a generic type for provider options
|
||||
type GenericProviderOption[T BaseProvider] func(T)
|
||||
|
||||
func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().cache = cache
|
||||
}
|
||||
}
|
||||
|
||||
func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().keyGenerator = keyGenerator
|
||||
}
|
||||
}
|
||||
|
||||
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().ff = ff
|
||||
}
|
||||
}
|
||||
|
||||
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().reader = reader
|
||||
}
|
||||
}
|
||||
|
||||
type BaseSeasonalProvider struct {
|
||||
querierV2 interfaces.Querier
|
||||
reader interfaces.Reader
|
||||
fluxInterval time.Duration
|
||||
cache cache.Cache
|
||||
keyGenerator cache.KeyGenerator
|
||||
ff interfaces.FeatureLookup
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
|
||||
if !req.Seasonality.IsValid() {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
}
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentPeriodResults, err = postprocess.PostProcessResult(currentPeriodResults, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pastPeriodResults, err = postprocess.PostProcessResult(pastPeriodResults, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentSeasonResults, err = postprocess.PostProcessResult(currentSeasonResults, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pastSeasonResults, err = postprocess.PostProcessResult(pastSeasonResults, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
past2SeasonResults, err = postprocess.PostProcessResult(past2SeasonResults, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
past3SeasonResults, err = postprocess.PostProcessResult(past3SeasonResults, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &anomalyQueryResults{
|
||||
CurrentPeriodResults: currentPeriodResults,
|
||||
PastPeriodResults: pastPeriodResults,
|
||||
CurrentSeasonResults: currentSeasonResults,
|
||||
PastSeasonResults: pastSeasonResults,
|
||||
Past2SeasonResults: past2SeasonResults,
|
||||
Past3SeasonResults: past3SeasonResults,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getMatchingSeries gets the matching series from the query result
|
||||
// for the given series
|
||||
func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series {
|
||||
if queryResult == nil || len(queryResult.Series) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, curr := range queryResult.Series {
|
||||
currLabels := labels.FromMap(curr.Labels)
|
||||
seriesLabels := labels.FromMap(series.Labels)
|
||||
if currLabels.Hash() == seriesLabels.Hash() {
|
||||
return curr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 {
|
||||
if series == nil || len(series.Points) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, smpl := range series.Points {
|
||||
sum += smpl.Value
|
||||
}
|
||||
return sum / float64(len(series.Points))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 {
|
||||
if series == nil || len(series.Points) == 0 {
|
||||
return 0
|
||||
}
|
||||
avg := p.getAvg(series)
|
||||
var sum float64
|
||||
for _, smpl := range series.Points {
|
||||
sum += math.Pow(smpl.Value-avg, 2)
|
||||
}
|
||||
return math.Sqrt(sum / float64(len(series.Points)))
|
||||
}
|
||||
|
||||
// getMovingAvg gets the moving average for the given series
|
||||
// for the given window size and start index
|
||||
func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSize, startIdx int) float64 {
|
||||
if series == nil || len(series.Points) == 0 {
|
||||
return 0
|
||||
}
|
||||
if startIdx >= len(series.Points)-movingAvgWindowSize {
|
||||
startIdx = int(math.Max(0, float64(len(series.Points)-movingAvgWindowSize)))
|
||||
}
|
||||
var sum float64
|
||||
points := series.Points[startIdx:]
|
||||
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
|
||||
sum += points[i].Value
|
||||
}
|
||||
avg := sum / float64(movingAvgWindowSize)
|
||||
return avg
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
|
||||
if len(floats) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, f := range floats {
|
||||
sum += f
|
||||
}
|
||||
return sum / float64(len(floats))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
|
||||
) *v3.Series {
|
||||
predictedSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
// for each point in the series, get the predicted value
|
||||
// the predicted value is the moving average (with window size = 7) of the previous period series
|
||||
// plus the average of the current season series
|
||||
// minus the mean of the past season series, past2 season series and past3 season series
|
||||
for idx, curr := range series.Points {
|
||||
predictedValue :=
|
||||
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
|
||||
p.getAvg(currentSeasonSeries) -
|
||||
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
|
||||
if predictedValue < 0 {
|
||||
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
|
||||
zap.L().Info("predictedSeries",
|
||||
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
|
||||
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
|
||||
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
|
||||
zap.Any("labels", series.Labels),
|
||||
zap.Float64("predictedValue", predictedValue),
|
||||
)
|
||||
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: predictedValue,
|
||||
})
|
||||
}
|
||||
|
||||
return predictedSeries
|
||||
}
|
||||
|
||||
// getBounds gets the upper and lower bounds for the given series
|
||||
// for the given z score threshold
|
||||
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||
func (p *BaseSeasonalProvider) getBounds(
|
||||
series, predictedSeries *v3.Series,
|
||||
zScoreThreshold float64,
|
||||
) (*v3.Series, *v3.Series) {
|
||||
upperBoundSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
lowerBoundSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
for idx, curr := range series.Points {
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||
upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: upperBound,
|
||||
})
|
||||
lowerBoundSeries.Points = append(lowerBoundSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: math.Max(lowerBound, 0),
|
||||
})
|
||||
}
|
||||
|
||||
return upperBoundSeries, lowerBoundSeries
|
||||
}
|
||||
|
||||
// getExpectedValue gets the expected value for the given series
|
||||
// for the given index
|
||||
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
|
||||
func (p *BaseSeasonalProvider) getExpectedValue(
|
||||
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, idx int,
|
||||
) float64 {
|
||||
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
|
||||
}
|
||||
|
||||
// getScore gets the anomaly score for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
// getAnomalyScores gets the anomaly scores for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
|
||||
) *v3.Series {
|
||||
anomalyScoreSeries := &v3.Series{
|
||||
Labels: series.Labels,
|
||||
LabelsArray: series.LabelsArray,
|
||||
Points: []v3.Point{},
|
||||
}
|
||||
|
||||
for idx, curr := range series.Points {
|
||||
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
|
||||
anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: anomalyScore,
|
||||
})
|
||||
}
|
||||
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentPeriodResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.CurrentPeriodResults {
|
||||
currentPeriodResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastPeriodResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.PastPeriodResults {
|
||||
pastPeriodResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
currentSeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.CurrentSeasonResults {
|
||||
currentSeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastSeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.PastSeasonResults {
|
||||
pastSeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
past2SeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.Past2SeasonResults {
|
||||
past2SeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
past3SeasonResultsMap := make(map[string]*v3.Result)
|
||||
for _, result := range anomalyQueryResults.Past3SeasonResults {
|
||||
past3SeasonResultsMap[result.QueryName] = result
|
||||
}
|
||||
|
||||
for _, result := range currentPeriodResultsMap {
|
||||
funcs := req.Params.CompositeQuery.BuilderQueries[result.QueryName].Functions
|
||||
|
||||
var zScoreThreshold float64
|
||||
for _, f := range funcs {
|
||||
if f.Name == v3.FunctionNameAnomaly {
|
||||
value, ok := f.NamedArgs["z_score_threshold"]
|
||||
if ok {
|
||||
zScoreThreshold = value.(float64)
|
||||
} else {
|
||||
zScoreThreshold = 3
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past2SeasonResult, ok := past2SeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past3SeasonResult, ok := past3SeasonResultsMap[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, series := range result.Series {
|
||||
stdDev := p.getStdDev(series)
|
||||
zap.L().Info("stdDev", zap.Float64("stdDev", stdDev), zap.Any("labels", series.Labels))
|
||||
|
||||
pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series)
|
||||
currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series)
|
||||
pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series)
|
||||
past2SeasonSeries := p.getMatchingSeries(past2SeasonResult, series)
|
||||
past3SeasonSeries := p.getMatchingSeries(past3SeasonResult, series)
|
||||
|
||||
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
zap.L().Info("getAvg", zap.Float64("prevSeriesAvg", prevSeriesAvg), zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg), zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg), zap.Float64("past2SeasonSeriesAvg", past2SeasonSeriesAvg), zap.Float64("past3SeasonSeriesAvg", past3SeasonSeriesAvg), zap.Any("labels", series.Labels))
|
||||
|
||||
predictedSeries := p.getPredictedSeries(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
result.PredictedSeries = append(result.PredictedSeries, predictedSeries)
|
||||
|
||||
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||
series,
|
||||
predictedSeries,
|
||||
zScoreThreshold,
|
||||
)
|
||||
result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries)
|
||||
result.LowerBoundSeries = append(result.LowerBoundSeries, lowerBoundSeries)
|
||||
|
||||
anomalyScoreSeries := p.getAnomalyScores(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries)
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*v3.Result, 0, len(currentPeriodResultsMap))
|
||||
for _, result := range currentPeriodResultsMap {
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return &GetAnomaliesResponse{
|
||||
Results: results,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*WeeklyProvider)(nil)
|
||||
|
||||
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &wp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
|
||||
wp := &WeeklyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(wp)
|
||||
}
|
||||
|
||||
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
FeatureLookup: wp.ff,
|
||||
})
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferDelta bool
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
@@ -38,8 +39,7 @@ type APIHandlerOptions struct {
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
FluxInterval time.Duration
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -53,6 +53,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PerferDelta: opts.PreferDelta,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
MaxIdleConns: opts.MaxIdleConns,
|
||||
MaxOpenConns: opts.MaxOpenConns,
|
||||
@@ -64,7 +65,6 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -177,8 +177,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
am.ViewAccess(ah.listLicensesV2)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
|
||||
// Gateway
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
||||
|
||||
|
||||
@@ -35,14 +35,14 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
||||
req := basemodel.LoginRequest{}
|
||||
err := parseRequest(r, &req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||
var apierr basemodel.BaseApiError
|
||||
var apierr *basemodel.ApiError
|
||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||
if apierr != nil && !apierr.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
@@ -74,7 +74,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
requestBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
zap.L().Error("received no input in api", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -90,13 +90,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if invite == nil {
|
||||
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
RespondError(w, basemodel.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
RespondError(w, basemodel.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &basemodel.PrecheckResponse{
|
||||
@@ -120,7 +120,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var precheckError basemodel.BaseApiError
|
||||
var precheckError *basemodel.ApiError
|
||||
|
||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||
if precheckError != nil {
|
||||
@@ -130,7 +130,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
// no-sso, validate password
|
||||
if err := baseauth.ValidatePassword(req.Password); err != nil {
|
||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
RespondError(w, basemodel.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
@@ -31,10 +29,6 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
|
||||
|
||||
// Get the dashboard UUID from the request
|
||||
uuid := mux.Vars(r)["uuid"]
|
||||
if strings.HasPrefix(uuid,"integration") {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
|
||||
return
|
||||
}
|
||||
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -27,12 +28,12 @@ func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
||||
req := model.OrgDomain{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.ValidNew(); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,18 +51,18 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.OrgDomain{Id: domainId}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
req.Id = domainId
|
||||
if err := req.Valid(nil); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
||||
@@ -77,7 +78,7 @@ func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,48 +1,17 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
featureSet, err := ah.FF().GetFeatureFlags()
|
||||
if err != nil {
|
||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if constants.FetchFeatures == "true" {
|
||||
zap.L().Debug("fetching license")
|
||||
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to fetch license", zap.Error(err))
|
||||
} else if license == nil {
|
||||
zap.L().Debug("no active license found")
|
||||
} else {
|
||||
licenseKey := license.Key
|
||||
|
||||
zap.L().Debug("fetching zeus features")
|
||||
zeusFeatures, err := fetchZeusFeatures(constants.ZeusFeaturesURL, licenseKey)
|
||||
if err == nil {
|
||||
zap.L().Debug("fetched zeus features", zap.Any("features", zeusFeatures))
|
||||
// merge featureSet and zeusFeatures in featureSet with higher priority to zeusFeatures
|
||||
featureSet = MergeFeatureSets(zeusFeatures, featureSet)
|
||||
} else {
|
||||
zap.L().Error("failed to fetch zeus features", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ah.opts.PreferSpanMetrics {
|
||||
for idx := range featureSet {
|
||||
feature := &featureSet[idx]
|
||||
@@ -51,96 +20,5 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ah.Respond(w, featureSet)
|
||||
}
|
||||
|
||||
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
|
||||
// and returns the FeatureSet.
|
||||
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
|
||||
// Check if the URL is empty
|
||||
if url == "" {
|
||||
return nil, fmt.Errorf("url is empty")
|
||||
}
|
||||
|
||||
// Check if the licenseKey is empty
|
||||
if licenseKey == "" {
|
||||
return nil, fmt.Errorf("licenseKey is empty")
|
||||
}
|
||||
|
||||
// Creating an HTTP client with a timeout for better control
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
// Creating a new GET request
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
// Setting the custom header
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
// Making the GET request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make GET request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Check for non-OK status code
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%w: %d %s", errors.New("received non-OK HTTP status code"), resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
|
||||
// Reading and decoding the response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
var zeusResponse ZeusFeaturesResponse
|
||||
if err := json.Unmarshal(body, &zeusResponse); err != nil {
|
||||
return nil, fmt.Errorf("%w: %v", errors.New("failed to decode response body"), err)
|
||||
}
|
||||
|
||||
if zeusResponse.Status != "success" {
|
||||
return nil, fmt.Errorf("%w: %s", errors.New("failed to fetch zeus features"), zeusResponse.Status)
|
||||
}
|
||||
|
||||
return zeusResponse.Data, nil
|
||||
}
|
||||
|
||||
type ZeusFeaturesResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data basemodel.FeatureSet `json:"data"`
|
||||
}
|
||||
|
||||
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
|
||||
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
|
||||
// Create a map to store the merged features
|
||||
featureMap := make(map[string]basemodel.Feature)
|
||||
|
||||
// Add all features from the otherFeatures set to the map
|
||||
for _, feature := range internalFeatures {
|
||||
featureMap[feature.Name] = feature
|
||||
}
|
||||
|
||||
// Add all features from the zeusFeatures set to the map
|
||||
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it
|
||||
for _, feature := range zeusFeatures {
|
||||
featureMap[feature.Name] = feature
|
||||
}
|
||||
|
||||
// Convert the map back to a FeatureSet slice
|
||||
var mergedFeatures basemodel.FeatureSet
|
||||
for _, feature := range featureMap {
|
||||
mergedFeatures = append(mergedFeatures, feature)
|
||||
}
|
||||
|
||||
return mergedFeatures
|
||||
}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestMergeFeatureSets(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
zeusFeatures basemodel.FeatureSet
|
||||
internalFeatures basemodel.FeatureSet
|
||||
expected basemodel.FeatureSet
|
||||
}{
|
||||
{
|
||||
name: "empty zeusFeatures and internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and empty internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty zeusFeatures and non-empty internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature4", Active: false},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
{Name: "Feature4", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actual := MergeFeatureSets(test.zeusFeatures, test.internalFeatures)
|
||||
assert.ElementsMatch(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -71,12 +72,12 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||
var l model.License
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if l.Key == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
||||
@@ -100,20 +101,20 @@ func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||
hClient := &http.Client{}
|
||||
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||
licenseResp, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// decode response body
|
||||
var resp checkoutResponse
|
||||
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -124,7 +125,7 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
licenseKey := r.URL.Query().Get("licenseKey")
|
||||
|
||||
if licenseKey == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -133,20 +134,20 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
hClient := &http.Client{}
|
||||
req, err := http.NewRequest("GET", billingURL, nil)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||
billingResp, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// decode response body
|
||||
var billingResponse billingDetails
|
||||
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -251,20 +252,20 @@ func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||
hClient := &http.Client{}
|
||||
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||
licenseResp, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// decode response body
|
||||
var resp checkoutResponse
|
||||
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
RespondError(w, basemodel.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := model.CreatePATRequestBody{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
RespondError(w, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
@@ -49,7 +49,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
err = validatePATRequest(pat)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||
var apierr basemodel.BaseApiError
|
||||
var apierr *basemodel.ApiError
|
||||
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
@@ -93,14 +93,14 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := model.PAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
RespondError(w, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
@@ -108,7 +108,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
err = validatePATRequest(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
RespondError(w, basemodel.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
id := mux.Vars(r)["id"]
|
||||
req.UpdatedAt = time.Now().Unix()
|
||||
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||
var apierr basemodel.BaseApiError
|
||||
var apierr *basemodel.ApiError
|
||||
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
@@ -129,8 +129,8 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
RespondError(w, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
@@ -149,8 +149,8 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||
id := mux.Vars(r)["id"]
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
RespondError(w, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
|
||||
|
||||
if apiErrorObj != nil {
|
||||
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
|
||||
RespondError(w, apiErrorObj, nil)
|
||||
return
|
||||
}
|
||||
queryRangeParams.Version = "v4"
|
||||
|
||||
// add temporality for each metric
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
anomalyQueryExists := false
|
||||
anomalyQuery := &v3.BuilderQuery{}
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
for _, fn := range query.Functions {
|
||||
if fn.Name == v3.FunctionNameAnomaly {
|
||||
anomalyQueryExists = true
|
||||
anomalyQuery = query
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if anomalyQueryExists {
|
||||
// ensure all queries have metric data source, and there should be only one anomaly query
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource != v3.DataSourceMetrics {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the threshold, and seasonality from the anomaly query
|
||||
var seasonality anomaly.Seasonality
|
||||
for _, fn := range anomalyQuery.Functions {
|
||||
if fn.Name == v3.FunctionNameAnomaly {
|
||||
seasonalityStr, ok := fn.NamedArgs["seasonality"].(string)
|
||||
if !ok {
|
||||
seasonalityStr = "daily"
|
||||
}
|
||||
if seasonalityStr == "weekly" {
|
||||
seasonality = anomaly.SeasonalityWeekly
|
||||
} else if seasonalityStr == "daily" {
|
||||
seasonality = anomaly.SeasonalityDaily
|
||||
} else {
|
||||
seasonality = anomaly.SeasonalityHourly
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
var provider anomaly.Provider
|
||||
switch seasonality {
|
||||
case anomaly.SeasonalityWeekly:
|
||||
provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityDaily:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityHourly:
|
||||
provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
}
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
uniqueResults := make(map[string]*v3.Result)
|
||||
for _, anomaly := range anomalies.Results {
|
||||
uniqueResults[anomaly.QueryName] = anomaly
|
||||
uniqueResults[anomaly.QueryName].IsAnomaly = true
|
||||
}
|
||||
aH.Respond(w, uniqueResults)
|
||||
} else {
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
aH.QueryRangeV4(w, r)
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,6 @@ import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
||||
baseapp.RespondError(w, apiErr, data)
|
||||
func RespondError(w http.ResponseWriter, apiErr *basemodel.ApiError, data interface{}) {
|
||||
baseapp.RespondError(w, apiErr)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
@@ -19,7 +18,7 @@ func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
401
ee/query-service/app/db/metrics.go
Normal file
@@ -0,0 +1,401 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// GetMetricResultEE runs the query and returns list of time series
|
||||
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
||||
|
||||
defer utils.Elapsed("GetMetricResult")()
|
||||
zap.L().Info("Executing metric result query: ", zap.String("query", query))
|
||||
|
||||
var hash string
|
||||
// If getSubTreeSpans function is used in the clickhouse query
|
||||
if strings.Contains(query, "getSubTreeSpans(") {
|
||||
var err error
|
||||
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
|
||||
if err == fmt.Errorf("no spans found for the given query") {
|
||||
return nil, "", nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
rows, err := r.conn.Query(ctx, query)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing query", zap.Error(err))
|
||||
return nil, "", fmt.Errorf("error in processing query")
|
||||
}
|
||||
|
||||
var (
|
||||
columnTypes = rows.ColumnTypes()
|
||||
columnNames = rows.Columns()
|
||||
vars = make([]interface{}, len(columnTypes))
|
||||
)
|
||||
for i := range columnTypes {
|
||||
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
|
||||
}
|
||||
// when group by is applied, each combination of cartesian product
|
||||
// of attributes is separate series. each item in metricPointsMap
|
||||
// represent a unique series.
|
||||
metricPointsMap := make(map[string][]basemodel.MetricPoint)
|
||||
// attribute key-value pairs for each group selection
|
||||
attributesMap := make(map[string]map[string]string)
|
||||
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(vars...); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var groupBy []string
|
||||
var metricPoint basemodel.MetricPoint
|
||||
groupAttributes := make(map[string]string)
|
||||
// Assuming that the end result row contains a timestamp, value and option labels
|
||||
// Label key and value are both strings.
|
||||
for idx, v := range vars {
|
||||
colName := columnNames[idx]
|
||||
switch v := v.(type) {
|
||||
case *string:
|
||||
// special case for returning all labels
|
||||
if colName == "fullLabels" {
|
||||
var metric map[string]string
|
||||
err := json.Unmarshal([]byte(*v), &metric)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for key, val := range metric {
|
||||
groupBy = append(groupBy, val)
|
||||
groupAttributes[key] = val
|
||||
}
|
||||
} else {
|
||||
groupBy = append(groupBy, *v)
|
||||
groupAttributes[colName] = *v
|
||||
}
|
||||
case *time.Time:
|
||||
metricPoint.Timestamp = v.UnixMilli()
|
||||
case *float64:
|
||||
metricPoint.Value = *v
|
||||
case **float64:
|
||||
// ch seems to return this type when column is derived from
|
||||
// SELECT count(*)/ SELECT count(*)
|
||||
floatVal := *v
|
||||
if floatVal != nil {
|
||||
metricPoint.Value = *floatVal
|
||||
}
|
||||
case *float32:
|
||||
float32Val := float32(*v)
|
||||
metricPoint.Value = float64(float32Val)
|
||||
case *uint8, *uint64, *uint16, *uint32:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
|
||||
}
|
||||
case *int8, *int16, *int32, *int64:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
||||
}
|
||||
default:
|
||||
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
|
||||
}
|
||||
}
|
||||
sort.Strings(groupBy)
|
||||
key := strings.Join(groupBy, "")
|
||||
attributesMap[key] = groupAttributes
|
||||
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
|
||||
}
|
||||
|
||||
var seriesList []*basemodel.Series
|
||||
for key := range metricPointsMap {
|
||||
points := metricPointsMap[key]
|
||||
// first point in each series could be invalid since the
|
||||
// aggregations are applied with point from prev series
|
||||
if len(points) != 0 && len(points) > 1 {
|
||||
points = points[1:]
|
||||
}
|
||||
attributes := attributesMap[key]
|
||||
series := basemodel.Series{Labels: attributes, Points: points}
|
||||
seriesList = append(seriesList, &series)
|
||||
}
|
||||
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||
// if err != nil {
|
||||
// zap.L().Error("Error in dropping temporary table: ", err)
|
||||
// return nil, err
|
||||
// }
|
||||
if hash == "" {
|
||||
return seriesList, hash, nil
|
||||
} else {
|
||||
return seriesList, "getSubTreeSpans" + hash, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
||||
|
||||
zap.L().Debug("Executing getSubTreeSpans function")
|
||||
|
||||
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
||||
|
||||
// process the query to fetch subTree query
|
||||
var subtreeInput string
|
||||
query, subtreeInput, hash = processQuery(query, hash)
|
||||
|
||||
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in dropping temporary table", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
|
||||
// Create temporary table to store the getSubTreeSpans() results
|
||||
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
||||
if err != nil {
|
||||
zap.L().Error("Error in creating temporary table", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
|
||||
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
||||
getSpansSubQuery := subtreeInput
|
||||
// Execute the subTree query
|
||||
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
|
||||
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
||||
|
||||
// zap.L().Info(getSpansSubQuery)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return query, hash, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
var searchScanResponses []basemodel.SearchSpanDBResponseItem
|
||||
|
||||
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
|
||||
// Fetch all the spans from of same TraceID so that we can build subtree
|
||||
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
|
||||
if len(getSpansSubQueryDBResponses) == 0 {
|
||||
return query, hash, fmt.Errorf("no spans found for the given query")
|
||||
}
|
||||
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
|
||||
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return query, hash, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
// Process model to fetch the spans
|
||||
zap.L().Debug("Processing model to fetch the spans")
|
||||
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
||||
for _, item := range searchScanResponses {
|
||||
var jsonItem basemodel.SearchSpanResponseItem
|
||||
json.Unmarshal([]byte(item.Model), &jsonItem)
|
||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
|
||||
if jsonItem.Events == nil {
|
||||
jsonItem.Events = []string{}
|
||||
}
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
}
|
||||
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
||||
// Use map to store pointer to the spans to avoid duplicates and save memory
|
||||
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
|
||||
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
||||
if err != nil {
|
||||
zap.L().Error("Error in preparing batch statement", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
for _, span := range treeSearchResponse {
|
||||
var parentID string
|
||||
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
|
||||
parentID = span.References[0].SpanId
|
||||
}
|
||||
err = statement.Append(
|
||||
time.Unix(0, int64(span.TimeUnixNano)),
|
||||
span.TraceID,
|
||||
span.SpanID,
|
||||
parentID,
|
||||
span.RootSpanID,
|
||||
span.ServiceName,
|
||||
span.Name,
|
||||
span.RootName,
|
||||
uint64(span.DurationNano),
|
||||
int8(span.Kind),
|
||||
span.TagMap,
|
||||
span.Events,
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
}
|
||||
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||
err = statement.Send()
|
||||
if err != nil {
|
||||
zap.L().Error("Error in sending statement", zap.Error(err))
|
||||
return query, hash, err
|
||||
}
|
||||
return query, hash, nil
|
||||
}
|
||||
|
||||
//lint:ignore SA4009 return hash is feeded to the query
|
||||
func processQuery(query string, hash string) (string, string, string) {
|
||||
re3 := regexp.MustCompile(`getSubTreeSpans`)
|
||||
|
||||
submatchall3 := re3.FindAllStringIndex(query, -1)
|
||||
getSubtreeSpansMatchIndex := submatchall3[0][1]
|
||||
|
||||
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
|
||||
|
||||
sqlCompleteIndex := 0
|
||||
countParenthesisImbalance := 0
|
||||
for i, char := range query2countParenthesis {
|
||||
|
||||
if string(char) == "(" {
|
||||
countParenthesisImbalance += 1
|
||||
}
|
||||
if string(char) == ")" {
|
||||
countParenthesisImbalance -= 1
|
||||
}
|
||||
if countParenthesisImbalance == 0 {
|
||||
sqlCompleteIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
|
||||
|
||||
// hash the subtreeInput
|
||||
hmd5 := md5.Sum([]byte(subtreeInput))
|
||||
hash = fmt.Sprintf("%x", hmd5)
|
||||
|
||||
// Reformat the query to use the getSubTreeSpans function
|
||||
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
|
||||
return query, subtreeInput, hash
|
||||
}
|
||||
|
||||
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
|
||||
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
|
||||
|
||||
var spans []*model.SpanForTraceDetails
|
||||
for _, spanItem := range payload {
|
||||
var parentID string
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
ServiceName: spanItem.ServiceName,
|
||||
Name: spanItem.Name,
|
||||
Kind: spanItem.Kind,
|
||||
DurationNano: spanItem.DurationNano,
|
||||
TagMap: spanItem.TagMap,
|
||||
ParentID: parentID,
|
||||
Events: spanItem.Events,
|
||||
HasError: spanItem.HasError,
|
||||
}
|
||||
spans = append(spans, span)
|
||||
}
|
||||
|
||||
zap.L().Debug("Building Tree")
|
||||
roots, err := buildSpanTrees(&spans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
|
||||
// Every span which was fetched from getSubTree Input SQL query is considered root
|
||||
// For each root, get the subtree spans
|
||||
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
||||
// Search target span object in the tree
|
||||
for _, root := range roots {
|
||||
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
||||
if targetSpan != nil {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if targetSpan == nil {
|
||||
return nil, nil
|
||||
}
|
||||
// Build subtree for the target span
|
||||
// Mark the target span as root by setting parent ID as empty string
|
||||
targetSpan.ParentID = ""
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
|
||||
// Get the subtree child spans
|
||||
for i := 0; len(preParents) != 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
children = append(children, parent.Children...)
|
||||
parents = append(parents, parent.Children...)
|
||||
}
|
||||
preParents = parents
|
||||
}
|
||||
|
||||
resultSpans := children
|
||||
// Add the target span to the result spans
|
||||
resultSpans = append(resultSpans, targetSpan)
|
||||
|
||||
for _, item := range resultSpans {
|
||||
references := []basemodel.OtelSpanRef{
|
||||
{
|
||||
TraceId: item.TraceID,
|
||||
SpanId: item.ParentID,
|
||||
RefType: "CHILD_OF",
|
||||
},
|
||||
}
|
||||
|
||||
if item.Events == nil {
|
||||
item.Events = []string{}
|
||||
}
|
||||
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
|
||||
TimeUnixNano: item.TimeUnixNano,
|
||||
SpanID: item.SpanID,
|
||||
TraceID: item.TraceID,
|
||||
ServiceName: item.ServiceName,
|
||||
Name: item.Name,
|
||||
Kind: item.Kind,
|
||||
References: references,
|
||||
DurationNano: item.DurationNano,
|
||||
TagMap: item.TagMap,
|
||||
Events: item.Events,
|
||||
HasError: item.HasError,
|
||||
RootSpanID: getSpansSubQueryDBResponse.SpanID,
|
||||
RootName: targetSpan.Name,
|
||||
}
|
||||
}
|
||||
}
|
||||
return searchSpansResult, nil
|
||||
}
|
||||
@@ -25,9 +25,8 @@ func NewDataConnector(
|
||||
maxOpenConns int,
|
||||
dialTimeout time.Duration,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"os"
|
||||
"regexp"
|
||||
@@ -28,10 +27,7 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
@@ -45,7 +41,6 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
@@ -53,7 +48,7 @@ import (
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
@@ -69,6 +64,7 @@ type ServerOptions struct {
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferDelta bool
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
@@ -77,13 +73,12 @@ type ServerOptions struct {
|
||||
FluxInterval string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
type Server struct {
|
||||
serverOptions *ServerOptions
|
||||
ruleManager *baserules.Manager
|
||||
ruleManager *rules.Manager
|
||||
|
||||
// public http router
|
||||
httpConn net.Listener
|
||||
@@ -116,10 +111,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
@@ -128,13 +119,33 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
gatewayFeature := basemodel.Feature{
|
||||
Name: "GATEWAY",
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
}
|
||||
|
||||
//Activate this feature if the url is not empty
|
||||
var gatewayProxy *httputil.ReverseProxy
|
||||
if serverOptions.GatewayUrl == "" {
|
||||
gatewayFeature.Active = false
|
||||
gatewayProxy, err = gateway.NewNoopProxy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
zap.L().Info("Enabling gateway feature flag ...")
|
||||
gatewayFeature.Active = true
|
||||
gatewayProxy, err = gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB, gatewayFeature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -155,7 +166,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.MaxOpenConns,
|
||||
serverOptions.DialTimeout,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
@@ -170,14 +180,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
@@ -185,23 +187,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
)
|
||||
lm)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
@@ -246,6 +238,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
|
||||
if err != nil {
|
||||
@@ -255,6 +256,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferDelta: serverOptions.PreferDelta,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
MaxIdleConns: serverOptions.MaxIdleConns,
|
||||
MaxOpenConns: serverOptions.MaxOpenConns,
|
||||
@@ -269,7 +271,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
@@ -324,7 +325,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
@@ -341,17 +342,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
|
||||
// add auth middleware
|
||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||
user, err := auth.GetUserFromRequest(r, apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user.User.OrgId == "" {
|
||||
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
}
|
||||
|
||||
return user, nil
|
||||
return auth.GetUserFromRequest(r, apiHandler)
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
@@ -365,13 +356,11 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
apiHandler.RegisterWebSocketPaths(r, am)
|
||||
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
@@ -383,7 +372,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -395,7 +383,6 @@ func loggingMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
@@ -408,41 +395,27 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||
// we default to that status code.
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Flush implements the http.Flush interface.
|
||||
func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// Support websockets
|
||||
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h, ok := lrw.ResponseWriter.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("hijack not supported")
|
||||
}
|
||||
return h.Hijack()
|
||||
}
|
||||
|
||||
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||
@@ -579,7 +552,6 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@@ -732,10 +704,8 @@ func makeRulesManager(
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool) (*baserules.Manager, error) {
|
||||
fm baseint.FeatureLookup) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -751,9 +721,12 @@ func makeRulesManager(
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
managerOpts := &rules.ManagerOptions{
|
||||
NotifierOpts: notifierOpts,
|
||||
PqlEngine: pqle,
|
||||
Queriers: &rules.Queriers{
|
||||
PqlEngine: pqle,
|
||||
Ch: ch.GetConn(),
|
||||
},
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
@@ -761,15 +734,10 @@ func makeRulesManager(
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
manager, err := baserules.NewManager(managerOpts)
|
||||
manager, err := rules.NewManager(managerOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ const (
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||
var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
|
||||
var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
|
||||
@@ -21,24 +21,24 @@ type ModelDao interface {
|
||||
DB() *sqlx.DB
|
||||
|
||||
// auth methods
|
||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
||||
CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError)
|
||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError)
|
||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||
|
||||
// org domain (auth domains) CRUD ops
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError)
|
||||
CreateDomain(ctx context.Context, d *model.OrgDomain) *basemodel.ApiError
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError)
|
||||
|
||||
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
||||
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
|
||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
|
||||
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
|
||||
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError)
|
||||
UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError
|
||||
GetPAT(ctx context.Context, pat string) (*model.PAT, *basemodel.ApiError)
|
||||
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) *basemodel.ApiError
|
||||
GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError)
|
||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError)
|
||||
ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError)
|
||||
RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError
|
||||
}
|
||||
|
||||
@@ -17,22 +17,19 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, *basemodel.ApiError) {
|
||||
// get auth domain from email domain
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||
}
|
||||
if domain == nil {
|
||||
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
|
||||
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to get domain from email"))
|
||||
}
|
||||
|
||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to generate password hash"))
|
||||
}
|
||||
|
||||
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||
@@ -64,12 +61,12 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
|
||||
|
||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||
// is successfully parsed (i.e. valid email is available)
|
||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError) {
|
||||
|
||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||
if !apierr.IsNil() {
|
||||
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
|
||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||
return "", basemodel.BadRequest(fmt.Errorf("invalid user email received from the auth provider"))
|
||||
}
|
||||
|
||||
user := &basemodel.User{}
|
||||
@@ -88,7 +85,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||
return "", basemodel.InternalError(fmt.Errorf("failed to generate token for the user"))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
@@ -98,7 +95,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
||||
tokenStore.RefreshJwt), nil
|
||||
}
|
||||
|
||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
|
||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError) {
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
return false, apierr
|
||||
@@ -113,7 +110,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
||||
}
|
||||
|
||||
if userPayload.Role != baseconst.AdminGroup {
|
||||
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
|
||||
return false, basemodel.BadRequest(fmt.Errorf("auth method not supported"))
|
||||
}
|
||||
|
||||
}
|
||||
@@ -123,7 +120,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
||||
|
||||
// PrecheckLogin is called when the login or signup page is loaded
|
||||
// to check sso login is to be prompted
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, *basemodel.ApiError) {
|
||||
|
||||
// assume user is valid unless proven otherwise
|
||||
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||
@@ -147,7 +144,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
ssoAvailable = false
|
||||
default:
|
||||
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||
return resp, model.BadRequestStr(err.Error())
|
||||
return resp, &basemodel.ApiError{Err: err, Typ: basemodel.ErrorBadData}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +177,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
siteUrl, err := url.Parse(escapedUrl)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to parse referer", zap.Error(err))
|
||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
||||
return resp, basemodel.InternalError(fmt.Errorf("failed to generate login request"))
|
||||
}
|
||||
|
||||
// build Idp URL that will authenticat the user
|
||||
@@ -189,7 +186,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||
return resp, model.InternalError(err)
|
||||
return resp, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
// set SSO to true, as the url is generated correctly
|
||||
|
||||
@@ -76,47 +76,47 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
||||
}
|
||||
|
||||
// GetDomainByName returns org domain for a given domain name
|
||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, *basemodel.ApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf("invalid domain name"))
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// GetDomain returns org domain for a given domain id
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf("invalid domain id"))
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// ListDomains gets the list of auth domains by org id
|
||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError) {
|
||||
domains := []model.OrgDomain{}
|
||||
|
||||
stored := []StoredDomain{}
|
||||
@@ -126,7 +126,7 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
||||
if err == sql.ErrNoRows {
|
||||
return []model.OrgDomain{}, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
for _, s := range stored {
|
||||
@@ -141,20 +141,20 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
||||
}
|
||||
|
||||
// CreateDomain creates a new auth domain
|
||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
domain.Id = uuid.New()
|
||||
}
|
||||
|
||||
if domain.OrgId == "" || domain.Name == "" {
|
||||
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
||||
return basemodel.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
@@ -168,24 +168,24 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to insert domain in db", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateDomain updates stored config params for a domain
|
||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
if err != nil {
|
||||
zap.L().Error("domain update failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
@@ -196,18 +196,18 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("domain update failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDomain deletes an org domain
|
||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError {
|
||||
|
||||
if id == uuid.Nil {
|
||||
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
@@ -216,21 +216,21 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("domain delete failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError) {
|
||||
|
||||
if email == "" {
|
||||
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
||||
}
|
||||
|
||||
components := strings.Split(email, "@")
|
||||
if len(components) < 2 {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid email address"))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf("invalid email address"))
|
||||
}
|
||||
|
||||
parsedDomain := components[1]
|
||||
@@ -242,12 +242,12 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError) {
|
||||
result, err := m.DB().ExecContext(ctx,
|
||||
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||
p.UserID,
|
||||
@@ -27,12 +27,12 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
id, err := result.LastInsertId()
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
p.Id = strconv.Itoa(int(id))
|
||||
createdByUser, _ := m.GetUser(ctx, p.UserID)
|
||||
@@ -53,7 +53,7 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError {
|
||||
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
|
||||
p.Role,
|
||||
@@ -63,29 +63,29 @@ func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemo
|
||||
id)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT update failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("PAT update failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError {
|
||||
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) *basemodel.ApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
|
||||
lastUsed,
|
||||
token)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
|
||||
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
|
||||
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||
}
|
||||
for i := range pats {
|
||||
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
|
||||
@@ -123,28 +123,28 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
|
||||
return pats, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError {
|
||||
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError {
|
||||
updatedAt := time.Now().Unix()
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
|
||||
userID, updatedAt, id)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||
return basemodel.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, *basemodel.ApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
return nil, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
|
||||
}
|
||||
}
|
||||
@@ -152,16 +152,16 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
return nil, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token"),
|
||||
}
|
||||
}
|
||||
@@ -170,7 +170,7 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
|
||||
}
|
||||
|
||||
// deprecated
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError) {
|
||||
users := []basemodel.UserPayload{}
|
||||
|
||||
query := `SELECT
|
||||
@@ -186,12 +186,12 @@ func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.U
|
||||
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
|
||||
|
||||
if err := m.DB().Select(&users, query, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
return nil, &basemodel.ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
)
|
||||
|
||||
func NewNoopProxy() (*httputil.ReverseProxy, error) {
|
||||
return &httputil.ReverseProxy{}, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
var C *Client
|
||||
@@ -37,7 +38,7 @@ func init() {
|
||||
}
|
||||
|
||||
// ActivateLicense sends key to license.signoz.io and gets activation data
|
||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
|
||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *basemodel.ApiError) {
|
||||
licenseReq := map[string]string{
|
||||
"key": key,
|
||||
"siteId": siteId,
|
||||
@@ -48,13 +49,13 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer httpResponse.Body.Close()
|
||||
@@ -64,22 +65,22 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
||||
err = json.Unmarshal(httpBody, &result)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||
return nil, basemodel.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||
}
|
||||
|
||||
switch httpResponse.StatusCode {
|
||||
case 200, 201:
|
||||
return result.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
return nil, basemodel.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
default:
|
||||
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
return nil, basemodel.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ValidateLicense validates the license key
|
||||
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
|
||||
func ValidateLicense(activationId string) (*ActivationResponse, *basemodel.ApiError) {
|
||||
validReq := map[string]string{
|
||||
"activationId": activationId,
|
||||
}
|
||||
@@ -88,12 +89,12 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
|
||||
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
return nil, basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||
return nil, basemodel.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
@@ -103,14 +104,14 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
|
||||
a := ActivationResult{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
return nil, basemodel.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
}
|
||||
return a.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
return nil, basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
return nil, basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
|
||||
@@ -127,21 +128,21 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
|
||||
}
|
||||
|
||||
// SendUsage reports the usage of signoz to license server
|
||||
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
||||
func SendUsage(ctx context.Context, usage model.UsagePayload) *basemodel.ApiError {
|
||||
reqString, _ := json.Marshal(usage)
|
||||
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
|
||||
return basemodel.BadRequest(errors.Wrap(err, "unable to create http request"))
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
return basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
||||
return basemodel.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
@@ -150,10 +151,10 @@ func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
||||
case 200, 201:
|
||||
return nil
|
||||
case 400, 401:
|
||||
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
return basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
return basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,17 +137,17 @@ func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
|
||||
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *basemodel.ApiError) {
|
||||
|
||||
licenses, err := lm.repo.GetLicenses(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
for _, l := range licenses {
|
||||
l.ParsePlan()
|
||||
|
||||
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key {
|
||||
if l.Key == lm.activeLicense.Key {
|
||||
l.IsCurrent = true
|
||||
}
|
||||
|
||||
@@ -212,8 +212,8 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||
|
||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError.Err
|
||||
zap.L().Error("failed to validate license", zap.Any("apiError", apiError))
|
||||
return apiError
|
||||
}
|
||||
|
||||
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
||||
@@ -255,7 +255,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||
}
|
||||
|
||||
// Activate activates a license key with signoz server
|
||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *basemodel.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||
@@ -268,7 +268,7 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
||||
|
||||
response, apiError := validate.ActivateLicense(key, "")
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
|
||||
zap.L().Error("failed to activate license", zap.Any("apiError", apiError))
|
||||
return nil, apiError
|
||||
}
|
||||
|
||||
@@ -283,14 +283,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
// store the license before activating it
|
||||
err = lm.repo.InsertLicense(ctx, l)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
return nil, basemodel.InternalError(err)
|
||||
}
|
||||
|
||||
// license is valid, activate it
|
||||
|
||||
@@ -87,9 +87,9 @@ func main() {
|
||||
var ruleRepoURL string
|
||||
var cluster string
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferDelta bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
@@ -97,17 +97,17 @@ func main() {
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
@@ -125,6 +125,7 @@ func main() {
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferDelta: preferDelta,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
@@ -136,7 +137,6 @@ func main() {
|
||||
FluxInterval: fluxInterval,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
||||
@@ -1,107 +1,5 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type ApiError struct {
|
||||
Typ basemodel.ErrorType
|
||||
Err error
|
||||
}
|
||||
|
||||
func (a *ApiError) Type() basemodel.ErrorType {
|
||||
return a.Typ
|
||||
}
|
||||
|
||||
func (a *ApiError) ToError() error {
|
||||
if a != nil {
|
||||
return a.Err
|
||||
}
|
||||
return a.Err
|
||||
}
|
||||
|
||||
func (a *ApiError) Error() string {
|
||||
return a.Err.Error()
|
||||
}
|
||||
|
||||
func (a *ApiError) IsNil() bool {
|
||||
return a == nil || a.Err == nil
|
||||
}
|
||||
|
||||
// NewApiError returns a ApiError object of given type
|
||||
func NewApiError(typ basemodel.ErrorType, err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: typ,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// BadRequest returns a ApiError object of bad request
|
||||
func BadRequest(err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorBadData,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// BadRequestStr returns a ApiError object of bad request for string input
|
||||
func BadRequestStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorBadData,
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
|
||||
// InternalError returns a ApiError object of internal type
|
||||
func InternalError(err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// InternalErrorStr returns a ApiError object of internal type for string input
|
||||
func InternalErrorStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
ErrorNone basemodel.ErrorType = ""
|
||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
||||
ErrorCanceled basemodel.ErrorType = "canceled"
|
||||
ErrorExec basemodel.ErrorType = "execution"
|
||||
ErrorBadData basemodel.ErrorType = "bad_data"
|
||||
ErrorInternal basemodel.ErrorType = "internal"
|
||||
ErrorUnavailable basemodel.ErrorType = "unavailable"
|
||||
ErrorNotFound basemodel.ErrorType = "not_found"
|
||||
ErrorNotImplemented basemodel.ErrorType = "not_implemented"
|
||||
ErrorUnauthorized basemodel.ErrorType = "unauthorized"
|
||||
ErrorForbidden basemodel.ErrorType = "forbidden"
|
||||
ErrorConflict basemodel.ErrorType = "conflict"
|
||||
ErrorStreamingNotSupported basemodel.ErrorType = "streaming is not supported"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ErrorNone = basemodel.ErrorNone
|
||||
ErrorTimeout = basemodel.ErrorTimeout
|
||||
ErrorCanceled = basemodel.ErrorCanceled
|
||||
ErrorExec = basemodel.ErrorExec
|
||||
ErrorBadData = basemodel.ErrorBadData
|
||||
ErrorInternal = basemodel.ErrorInternal
|
||||
ErrorUnavailable = basemodel.ErrorUnavailable
|
||||
ErrorNotFound = basemodel.ErrorNotFound
|
||||
ErrorNotImplemented = basemodel.ErrorNotImplemented
|
||||
ErrorUnauthorized = basemodel.ErrorUnauthorized
|
||||
ErrorForbidden = basemodel.ErrorForbidden
|
||||
ErrorConflict = basemodel.ErrorConflict
|
||||
ErrorStreamingNotSupported = basemodel.ErrorStreamingNotSupported
|
||||
}
|
||||
|
||||
type ErrUnsupportedAuth struct{}
|
||||
|
||||
func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {
|
||||
|
||||
@@ -11,8 +11,6 @@ const Enterprise = "ENTERPRISE_PLAN"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
const Gateway = "GATEWAY"
|
||||
const PremiumSupport = "PREMIUM_SUPPORT"
|
||||
|
||||
var BasicPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
@@ -113,27 +111,6 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
@@ -228,27 +205,6 @@ var ProPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
@@ -357,25 +313,4 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,393 +0,0 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/times"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/formatter"
|
||||
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
RuleTypeAnomaly = "anomaly_rule"
|
||||
)
|
||||
|
||||
type AnomalyRule struct {
|
||||
*baserules.BaseRule
|
||||
|
||||
mtx sync.Mutex
|
||||
|
||||
reader interfaces.Reader
|
||||
|
||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||
querierV2 interfaces.Querier
|
||||
|
||||
provider anomaly.Provider
|
||||
|
||||
seasonality anomaly.Seasonality
|
||||
}
|
||||
|
||||
func NewAnomalyRule(
|
||||
id string,
|
||||
p *baserules.PostableRule,
|
||||
featureFlags interfaces.FeatureLookup,
|
||||
reader interfaces.Reader,
|
||||
cache cache.Cache,
|
||||
opts ...baserules.RuleOption,
|
||||
) (*AnomalyRule, error) {
|
||||
|
||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
||||
|
||||
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := AnomalyRule{
|
||||
BaseRule: baseRule,
|
||||
}
|
||||
|
||||
switch strings.ToLower(p.RuleCondition.Seasonality) {
|
||||
case "hourly":
|
||||
t.seasonality = anomaly.SeasonalityHourly
|
||||
case "daily":
|
||||
t.seasonality = anomaly.SeasonalityDaily
|
||||
case "weekly":
|
||||
t.seasonality = anomaly.SeasonalityWeekly
|
||||
default:
|
||||
t.seasonality = anomaly.SeasonalityDaily
|
||||
}
|
||||
|
||||
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
}
|
||||
|
||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||
t.reader = reader
|
||||
if t.seasonality == anomaly.SeasonalityHourly {
|
||||
t.provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](featureFlags),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||
t.provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](featureFlags),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||
t.provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](featureFlags),
|
||||
)
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) Type() baserules.RuleType {
|
||||
return RuleTypeAnomaly
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||
|
||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
|
||||
|
||||
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
||||
end := ts.UnixMilli()
|
||||
|
||||
if r.EvalDelay() > 0 {
|
||||
start = start - int64(r.EvalDelay().Milliseconds())
|
||||
end = end - int64(r.EvalDelay().Milliseconds())
|
||||
}
|
||||
// round to minute otherwise we could potentially miss data
|
||||
start = start - (start % (60 * 1000))
|
||||
end = end - (end % (60 * 1000))
|
||||
|
||||
compositeQuery := r.Condition().CompositeQuery
|
||||
|
||||
if compositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
compositeQuery.PanelType = v3.PanelTypeGraph
|
||||
}
|
||||
|
||||
// default mode
|
||||
return &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
|
||||
CompositeQuery: compositeQuery,
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) GetSelectedQuery() string {
|
||||
return r.Condition().GetSelectedQueryName()
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baserules.Vector, error) {
|
||||
|
||||
params, err := r.prepareQueryRange(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.PopulateTemporality(ctx, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error while setting temporality")
|
||||
}
|
||||
|
||||
anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{
|
||||
Params: params,
|
||||
Seasonality: r.seasonality,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var queryResult *v3.Result
|
||||
for _, result := range anomalies.Results {
|
||||
if result.QueryName == r.GetSelectedQuery() {
|
||||
queryResult = result
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var resultVector baserules.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||
if shouldAlert {
|
||||
resultVector = append(resultVector, smpl)
|
||||
}
|
||||
}
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) {
|
||||
|
||||
prevState := r.State()
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
res, err := r.buildAndRunQuery(ctx, ts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
resultFPs := map[uint64]struct{}{}
|
||||
var alerts = make(map[uint64]*baserules.Alert, len(res))
|
||||
|
||||
for _, smpl := range res {
|
||||
l := make(map[string]string, len(smpl.Metric))
|
||||
for _, lbl := range smpl.Metric {
|
||||
l[lbl.Name] = lbl.Value
|
||||
}
|
||||
|
||||
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
||||
|
||||
tmplData := baserules.AlertTemplateData(l, value, threshold)
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
// who are not used to Go's templating system.
|
||||
defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}"
|
||||
|
||||
// utility function to apply go template on labels and annotations
|
||||
expand := func(text string) string {
|
||||
|
||||
tmpl := baserules.NewTemplateExpander(
|
||||
ctx,
|
||||
defs+text,
|
||||
"__alert_"+r.Name(),
|
||||
tmplData,
|
||||
times.Time(timestamp.FromTime(ts)),
|
||||
nil,
|
||||
)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel)
|
||||
resultLabels := labels.NewBuilder(smpl.MetricOrig).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel).Labels()
|
||||
|
||||
for name, value := range r.Labels().Map() {
|
||||
lb.Set(name, expand(value))
|
||||
}
|
||||
|
||||
lb.Set(labels.AlertNameLabel, r.Name())
|
||||
lb.Set(labels.AlertRuleIdLabel, r.ID())
|
||||
lb.Set(labels.RuleSourceLabel, r.GeneratorURL())
|
||||
|
||||
annotations := make(labels.Labels, 0, len(r.Annotations().Map()))
|
||||
for name, value := range r.Annotations().Map() {
|
||||
annotations = append(annotations, labels.Label{Name: common.NormalizeLabelName(name), Value: expand(value)})
|
||||
}
|
||||
if smpl.IsMissing {
|
||||
lb.Set(labels.AlertNameLabel, "[No data] "+r.Name())
|
||||
}
|
||||
|
||||
lbs := lb.Labels()
|
||||
h := lbs.Hash()
|
||||
resultFPs[h] = struct{}{}
|
||||
|
||||
if _, ok := alerts[h]; ok {
|
||||
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alerts[h] = &baserules.Alert{
|
||||
Labels: lbs,
|
||||
QueryResultLables: resultLabels,
|
||||
Annotations: annotations,
|
||||
ActiveAt: ts,
|
||||
State: model.StatePending,
|
||||
Value: smpl.V,
|
||||
GeneratorURL: r.GeneratorURL(),
|
||||
Receivers: r.PreferredChannels(),
|
||||
Missing: smpl.IsMissing,
|
||||
}
|
||||
}
|
||||
|
||||
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
||||
|
||||
// alerts[h] is ready, add or update active list now
|
||||
for h, a := range alerts {
|
||||
// Check whether we already have alerting state for the identifying label set.
|
||||
// Update the last value and annotations if so, create a new alert entry otherwise.
|
||||
if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive {
|
||||
|
||||
alert.Value = a.Value
|
||||
alert.Annotations = a.Annotations
|
||||
alert.Receivers = r.PreferredChannels()
|
||||
continue
|
||||
}
|
||||
|
||||
r.Active[h] = a
|
||||
}
|
||||
|
||||
itemsToAdd := []model.RuleStateHistory{}
|
||||
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, a := range r.Active {
|
||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||
if err != nil {
|
||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
||||
}
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > baserules.ResolvedRetention) {
|
||||
delete(r.Active, fp)
|
||||
}
|
||||
if a.State != model.StateInactive {
|
||||
a.State = model.StateInactive
|
||||
a.ResolvedAt = ts
|
||||
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: model.StateInactive,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: model.LabelsString(labelsJSON),
|
||||
Fingerprint: a.QueryResultLables.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if a.State == model.StatePending && ts.Sub(a.ActiveAt) >= r.HoldDuration() {
|
||||
a.State = model.StateFiring
|
||||
a.FiredAt = ts
|
||||
state := model.StateFiring
|
||||
if a.Missing {
|
||||
state = model.StateNoData
|
||||
}
|
||||
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: state,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: model.LabelsString(labelsJSON),
|
||||
Fingerprint: a.QueryResultLables.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
currentState := r.State()
|
||||
|
||||
overallStateChanged := currentState != prevState
|
||||
for idx, item := range itemsToAdd {
|
||||
item.OverallStateChanged = overallStateChanged
|
||||
item.OverallState = currentState
|
||||
itemsToAdd[idx] = item
|
||||
}
|
||||
|
||||
r.RecordRuleStateHistory(ctx, prevState, currentState, itemsToAdd)
|
||||
|
||||
return len(r.Active), nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) String() string {
|
||||
|
||||
ar := baserules.PostableRule{
|
||||
AlertName: r.Name(),
|
||||
RuleCondition: r.Condition(),
|
||||
EvalWindow: baserules.Duration(r.EvalWindow()),
|
||||
Labels: r.Labels().Map(),
|
||||
Annotations: r.Annotations().Map(),
|
||||
PreferredChannels: r.PreferredChannels(),
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(ar)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
||||
}
|
||||
|
||||
return string(byt)
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
)
|
||||
|
||||
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
|
||||
|
||||
rules := make([]baserules.Rule, 0)
|
||||
var task baserules.Task
|
||||
|
||||
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
||||
if opts.Rule.RuleType == baserules.RuleTypeThreshold {
|
||||
// create a threshold rule
|
||||
tr, err := baserules.NewThresholdRule(
|
||||
ruleId,
|
||||
opts.Rule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return task, err
|
||||
}
|
||||
|
||||
rules = append(rules, tr)
|
||||
|
||||
// create ch rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
|
||||
} else if opts.Rule.RuleType == baserules.RuleTypeProm {
|
||||
|
||||
// create promql rule
|
||||
pr, err := baserules.NewPromRule(
|
||||
ruleId,
|
||||
opts.Rule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return task, err
|
||||
}
|
||||
|
||||
rules = append(rules, pr)
|
||||
|
||||
// create promql rule task for evalution
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
|
||||
} else if opts.Rule.RuleType == baserules.RuleTypeAnomaly {
|
||||
// create anomaly rule
|
||||
ar, err := NewAnomalyRule(
|
||||
ruleId,
|
||||
opts.Rule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.Cache,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
if err != nil {
|
||||
return task, err
|
||||
}
|
||||
|
||||
rules = append(rules, ar)
|
||||
|
||||
// create anomaly rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported rule type. Supported types: %s, %s", baserules.RuleTypeProm, baserules.RuleTypeThreshold)
|
||||
}
|
||||
|
||||
return task, nil
|
||||
}
|
||||
|
||||
// newTask returns an appropriate group for
|
||||
// rule type
|
||||
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
|
||||
if taskType == baserules.TaskTypeCh {
|
||||
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||
}
|
||||
return baserules.NewPromRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
||||
} else if apiErr != nil {
|
||||
// sleeping for exponential backoff
|
||||
sleepDuration := RetryInterval * time.Duration(i)
|
||||
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err))
|
||||
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Any("apiErr", apiErr))
|
||||
time.Sleep(sleepDuration)
|
||||
} else {
|
||||
break
|
||||
|
||||
@@ -9,7 +9,6 @@ const config: Config.InitialOptions = {
|
||||
modulePathIgnorePatterns: ['dist'],
|
||||
moduleNameMapper: {
|
||||
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||
'\\.md$': '<rootDir>/__mocks__/cssMock.ts',
|
||||
},
|
||||
globals: {
|
||||
extensionsToTreatAsEsm: ['.ts'],
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"ansi-to-html": "0.7.2",
|
||||
"antd": "5.11.0",
|
||||
"antd-table-saveas-excel": "2.2.1",
|
||||
"axios": "1.7.4",
|
||||
"axios": "1.6.4",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^29.6.4",
|
||||
"babel-loader": "9.1.3",
|
||||
@@ -88,7 +88,6 @@
|
||||
"lucide-react": "0.379.0",
|
||||
"mini-css-extract-plugin": "2.4.5",
|
||||
"papaparse": "5.4.1",
|
||||
"posthog-js": "1.160.3",
|
||||
"rc-tween-one": "3.0.6",
|
||||
"react": "18.2.0",
|
||||
"react-addons-update": "15.6.3",
|
||||
@@ -110,8 +109,6 @@
|
||||
"react-syntax-highlighter": "15.5.0",
|
||||
"react-use": "^17.3.2",
|
||||
"react-virtuoso": "4.0.3",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"overlayscrollbars": "^2.8.1",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"rehype-raw": "7.0.0",
|
||||
@@ -207,6 +204,7 @@
|
||||
"eslint-plugin-sonarjs": "^0.12.0",
|
||||
"husky": "^7.0.4",
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.2",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"lint-staged": "^12.5.0",
|
||||
"msw": "1.3.2",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4344_1236)" stroke="#C0C1C3" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M4.667 1.167H2.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V2.333c0-.644-.522-1.166-1.166-1.166zM8.167 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M11.667 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M5.833 10.5H2.917c-.992 0-1.75-.758-1.75-1.75v-.583"/><path d="M4.083 12.25l1.75-1.75-1.75-1.75M11.667 8.167H9.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V9.333c0-.644-.522-1.166-1.166-1.166z"/></g><defs><clipPath id="prefix__clip0_4344_1236"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>
|
||||
|
Before Width: | Height: | Size: 878 B |
@@ -1 +0,0 @@
|
||||
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4062_7291)" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M7 12.833A5.833 5.833 0 107 1.167a5.833 5.833 0 000 11.666z" fill="#E5484D" stroke="#E5484D"/><path d="M8.75 5.25l-3.5 3.5M5.25 5.25l3.5 3.5" stroke="#121317"/></g><defs><clipPath id="prefix__clip0_4062_7291"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>
|
||||
|
Before Width: | Height: | Size: 467 B |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.94" y1="3.74" x2="8.67" y2="3.74" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="b" x1="9.13" y1="3.79" x2="14.85" y2="3.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="c" x1=".01" y1="9.12" x2="5.73" y2="9.12" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="d" x1="6.18" y1="9.08" x2="11.9" y2="9.08" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="e" x1="12.35" y1="9.13" x2="18.08" y2="9.13" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="f" x1="2.87" y1="14.56" x2="8.6" y2="14.56" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="g" x1="9.05" y1="14.6" x2="14.78" y2="14.6" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient></defs><path fill="url(#a)" d="M5.8 1.22l-2.86.53v3.9l2.86.61 2.87-1.15V2.2L5.8 1.22z"/><path d="M5.91 6.2l2.62-1.06A.2.2 0 008.65 5V2.36a.21.21 0 00-.13-.18l-2.65-.9h-.12l-2.6.48a.2.2 0 00-.15.18v3.53a.19.19 0 00.15.19l2.63.55a.32.32 0 00.13-.01z" fill="none"/><path d="M2.94 1.75v3.9l2.89.61v-5zm1.22 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2l.93-.16z" fill="#341a6e"/><path fill="url(#b)" d="M11.99 1.27l-2.86.53v3.9l2.86.61 2.86-1.16v-2.9l-2.86-.98z"/><path d="M9.13 1.8v3.9l2.87.61v-5zm1.21 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2.05l.93-.17z" fill="#341a6e"/><path fill="url(#c)" d="M2.87 6.6l-2.86.53v3.9l2.86.61 2.87-1.15V7.58L2.87 6.6z"/><path d="M0 7.13V11l2.89.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.27.26l-.93-.15V7.38l.93-.16z" fill="#341a6e"/><path fill="url(#d)" d="M9.04 6.56l-2.86.53v3.9l2.86.62 2.86-1.16V7.54l-2.86-.98z"/><path d="M6.18 7.09V11l2.88.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.34l.93-.16z" fill="#341a6e"/><path fill="url(#e)" d="M15.21 6.61l-2.86.53v3.9l2.86.61 2.87-1.15V7.59l-2.87-.98z"/><path d="M12.35 7.14V11l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.39l.93-.16z" fill="#341a6e"/><path fill="url(#f)" d="M5.73 12.04l-2.86.52v3.9l2.86.62 2.87-1.16v-2.9l-2.87-.98z"/><path d="M5.84 17l2.61-1a.18.18 0 00.12-.18v-2.6a.2.2 0 00-.13-.22l-2.64-.9a.17.17 0 00-.12 0l-2.6.47a.19.19 0 00-.16.19v3.54a.19.19 0 00.15.19L5.7 17a.23.23 0 00.14 0z" fill="none"/><path d="M2.87 12.56v3.9l2.89.62V12zm1.22 3.61L3.28 16v-3l.81-.14zm1.26.23l-.93-.15v-3.44l.93-.16z" fill="#341a6e"/><path fill="url(#g)" d="M11.91 12.08l-2.86.53v3.9l2.86.61 2.87-1.15v-2.91l-2.87-.98z"/><path d="M9.05 12.61v3.9l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15v-3.43l.93-.16z" fill="#341a6e"/></svg>
|
||||
|
Before Width: | Height: | Size: 3.1 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="b" x1="4.4" y1="11.48" x2="4.37" y2="7.53" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="c" x1="10.13" y1="15.45" x2="10.13" y2="11.9" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="d" x1="14.18" y1="11.15" x2="14.18" y2="7.38" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><radialGradient id="a" cx="13428.81" cy="3518.86" r="56.67" gradientTransform="matrix(.15 0 0 .15 -2005.33 -518.83)" gradientUnits="userSpaceOnUse"><stop offset=".18" stop-color="#5ea0ef"/><stop offset="1" stop-color="#0078d4"/></radialGradient></defs><path d="M14.21 15.72A8.5 8.5 0 013.79 2.28l.09-.06a8.5 8.5 0 0110.33 13.5" fill="url(#a)"/><path d="M6.69 7.23a13 13 0 018.91-3.58 8.47 8.47 0 00-1.49-1.44 14.34 14.34 0 00-4.69 1.1 12.54 12.54 0 00-4.08 2.82 2.76 2.76 0 011.35 1.1zM2.48 10.65a17.86 17.86 0 00-.83 2.62 7.82 7.82 0 00.62.92c.18.23.35.44.55.65a17.94 17.94 0 011.08-3.47 2.76 2.76 0 01-1.42-.72z" fill="#fff" opacity=".6"/><path d="M3.46 6.11a12 12 0 01-.69-2.94 8.15 8.15 0 00-1.1 1.45A12.69 12.69 0 002.24 7a2.69 2.69 0 011.22-.89z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><path d="M8.36 13.67a1.77 1.77 0 01.54-1.27 11.88 11.88 0 01-2.53-1.86 2.74 2.74 0 01-1.49.83 13.1 13.1 0 001.45 1.28 12.12 12.12 0 002.05 1.25 1.79 1.79 0 01-.02-.23zM14.66 13.88a12 12 0 01-2.76-.32.41.41 0 010 .11 1.75 1.75 0 01-.51 1.24 13.69 13.69 0 003.42.24A8.21 8.21 0 0016 13.81a11.5 11.5 0 01-1.34.07z" fill="#f2f2f2" opacity=".55"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/><path d="M12.32 8.93a1.83 1.83 0 01.61-1 25.5 25.5 0 01-4.46-4.14 16.91 16.91 0 01-2-2.92 7.64 7.64 0 00-1.09.42 18.14 18.14 0 002.15 3.18 26.44 26.44 0 004.79 4.46z" fill="#f2f2f2" opacity=".7"/><circle cx="14.18" cy="9.27" r="1.89" fill="url(#d)"/><path d="M17.35 10.54l-.35-.17-.3-.16h-.06l-.26-.21h-.07L16 9.8a1.76 1.76 0 01-.64.92c.12.08.25.15.38.22l.08.05.35.19.86.45a8.63 8.63 0 00.29-1.11z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/></svg>
|
||||
|
Before Width: | Height: | Size: 2.3 KiB |
|
Before Width: | Height: | Size: 6.7 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="b27f1ad0-7d11-4247-9da3-91bce6211f32" x1="8.798" y1="8.703" x2="14.683" y2="8.703" gradientUnits="userSpaceOnUse"><stop offset="0.001" stop-color="#773adc" /><stop offset="1" stop-color="#552f99" /></linearGradient><linearGradient id="b2f92112-4ca9-4b17-a019-c9f26c1a4a8f" x1="5.764" y1="3.777" x2="5.764" y2="13.78" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a67af4" /><stop offset="0.999" stop-color="#773adc" /></linearGradient></defs><g id="b8a0486a-5501-4d92-b540-a766c4b3b548"><g><g><g><path d="M16.932,11.578a8.448,8.448,0,0,1-7.95,5.59,8.15,8.15,0,0,1-2.33-.33,2.133,2.133,0,0,0,.18-.83c.01,0,.03.01.04.01a7.422,7.422,0,0,0,2.11.3,7.646,7.646,0,0,0,6.85-4.28l.01-.01Z" fill="#32bedd" /><path d="M3.582,14.068a2.025,2.025,0,0,0-.64.56,8.6,8.6,0,0,1-1.67-2.44l1.04.23v.26a.6.6,0,0,0,.47.59l.14.03a6.136,6.136,0,0,0,.62.73Z" fill="#32bedd" /><path d="M12.352.958a2.28,2.28,0,0,0-.27.81c-.02-.01-.05-.02-.07-.03a7.479,7.479,0,0,0-3.03-.63,7.643,7.643,0,0,0-5.9,2.8l-.29.06a.6.6,0,0,0-.48.58v.46l-1.02.19A8.454,8.454,0,0,1,8.982.268,8.6,8.6,0,0,1,12.352.958Z" fill="#32bedd" /><path d="M16.872,5.7l-1.09-.38a6.6,6.6,0,0,0-.72-1.16c-.02-.03-.04-.05-.05-.07a2.083,2.083,0,0,0,.72-.45A7.81,7.81,0,0,1,16.872,5.7Z" fill="#32bedd" /><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="#fff" /><g><g id="e918f286-5032-4942-ad29-ea17e6f1cc90"><path d="M1.1,5.668l1.21-.23v6.55l-1.23-.27-.99-.22a.111.111,0,0,1-.09-.12v-5.4a.12.12,0,0,1,.09-.12Z" fill="#a67af4" /></g><g><g id="a47a99dd-4d47-4c70-8c42-c5ac274ce496"><g><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="url(#b27f1ad0-7d11-4247-9da3-91bce6211f32)" /><path d="M8.586,3.3,2.878,4.378a.177.177,0,0,0-.14.175V12.68a.177.177,0,0,0,.137.174L8.581,14.1a.176.176,0,0,0,.21-.174V3.478A.175.175,0,0,0,8.619,3.3Z" fill="url(#b2f92112-4ca9-4b17-a019-c9f26c1a4a8f)" /></g></g><polygon points="5.948 4.921 5.948 12.483 7.934 12.814 7.934 4.564 5.948 4.921" fill="#b796f9" opacity="0.5" /><polygon points="3.509 5.329 3.509 11.954 5.238 12.317 5.238 5.031 3.509 5.329" fill="#b796f9" opacity="0.5" /></g></g></g><path d="M16,2.048a1.755,1.755,0,1,1-1.76-1.76A1.756,1.756,0,0,1,16,2.048Z" fill="#32bedd" /><circle cx="4.65" cy="15.973" r="1.759" fill="#32bedd" /></g><path d="M18,6.689v3.844a.222.222,0,0,1-.133.2l-.766.316-3.07,1.268-.011,0a.126.126,0,0,1-.038,0,.1.1,0,0,1-.1-.1V5.234a.1.1,0,0,1,.054-.088l0,0,.019,0a.031.031,0,0,1,.019,0,.055.055,0,0,1,.034.008l.011,0,.012,0L17.05,6.2l.8.282A.213.213,0,0,1,18,6.689Z" fill="#773adc" /><path d="M13.959,5.14l-3.8.715a.118.118,0,0,0-.093.117v5.409a.118.118,0,0,0,.091.116l3.8.831a.115.115,0,0,0,.137-.09.109.109,0,0,0,0-.026V5.256a.117.117,0,0,0-.115-.118A.082.082,0,0,0,13.959,5.14Z" fill="#a67af4" /></g></g></svg>
|
||||
|
Before Width: | Height: | Size: 3.1 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="-175.993" y1="-343.723" x2="-175.993" y2="-359.232" gradientTransform="matrix(1.156 0 0 1.029 212.573 370.548)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#fea11b"/><stop offset=".284" stop-color="#fea51a"/><stop offset=".547" stop-color="#feb018"/><stop offset=".8" stop-color="#ffc314"/><stop offset="1" stop-color="#ffd70f"/></linearGradient></defs><path d="M5.54 13.105l-.586.588a.267.267 0 01-.377 0L.223 9.353a.533.533 0 010-.755l.588-.59 4.732 4.718a.267.267 0 010 .378z" fill="#50e6ff"/><path d="M4.863 4.305l.59.588a.267.267 0 010 .378L.806 9.932l-.59-.589a.533.533 0 01-.001-.754l4.273-4.285a.267.267 0 01.376 0z" fill="#1490df"/><path d="M17.19 8.012l.588.59a.533.533 0 01-.001.754l-4.354 4.34a.267.267 0 01-.377 0l-.586-.587a.267.267 0 010-.377l4.732-4.718z" fill="#50e6ff"/><path d="M17.782 9.34l-.59.589-4.648-4.662a.267.267 0 010-.377l.59-.588a.267.267 0 01.378 0l4.273 4.286a.533.533 0 010 .753z" fill="#1490df"/><path d="M8.459 9.9H4.87a.193.193 0 01-.2-.181.166.166 0 01.018-.075L8.991 1.13a.206.206 0 01.186-.106h4.245a.193.193 0 01.2.181.165.165 0 01-.035.1L8.534 7.966h4.928a.193.193 0 01.2.181.176.176 0 01-.052.122l-8.189 8.519c-.077.046-.624.5-.356-.189z" fill="url(#a)"/></svg>
|
||||
|
Before Width: | Height: | Size: 1.3 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><radialGradient id="b" cx="9.36" cy="10.57" r="7.07" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#f2f2f2"/><stop offset=".58" stop-color="#eee"/><stop offset="1" stop-color="#e6e6e6"/></radialGradient><linearGradient id="a" x1="2.59" y1="10.16" x2="15.41" y2="10.16" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#005ba1"/><stop offset=".07" stop-color="#0060a9"/><stop offset=".36" stop-color="#0071c8"/><stop offset=".52" stop-color="#0078d4"/><stop offset=".64" stop-color="#0074cd"/><stop offset=".82" stop-color="#006abb"/><stop offset="1" stop-color="#005ba1"/></linearGradient></defs><path d="M9 5.14c-3.54 0-6.41-1-6.41-2.32v12.36c0 1.27 2.82 2.3 6.32 2.32H9c3.54 0 6.41-1 6.41-2.32V2.82c0 1.29-2.87 2.32-6.41 2.32z" fill="url(#a)"/><path d="M15.41 2.82c0 1.29-2.87 2.32-6.41 2.32s-6.41-1-6.41-2.32S5.46.5 9 .5s6.41 1 6.41 2.32" fill="#e8e8e8"/><path d="M13.92 2.63c0 .82-2.21 1.48-4.92 1.48s-4.92-.66-4.92-1.48S6.29 1.16 9 1.16s4.92.66 4.92 1.47" fill="#50e6ff"/><path d="M9 3a11.55 11.55 0 00-3.89.57A11.42 11.42 0 009 4.11a11.15 11.15 0 003.89-.58A11.84 11.84 0 009 3z" fill="#198ab3"/><path d="M12.9 11.4V8H12v4.13h2.46v-.73zM5.76 9.73a1.83 1.83 0 01-.51-.31.44.44 0 01-.12-.32.34.34 0 01.15-.3.68.68 0 01.42-.12 1.62 1.62 0 011 .29v-.86a2.58 2.58 0 00-1-.16 1.64 1.64 0 00-1.09.34 1.08 1.08 0 00-.42.89c0 .51.32.91 1 1.21a2.88 2.88 0 01.62.36.42.42 0 01.15.32.38.38 0 01-.16.31.81.81 0 01-.45.11 1.66 1.66 0 01-1.09-.42V12a2.17 2.17 0 001.07.24 1.88 1.88 0 001.18-.33 1.08 1.08 0 00.33-.91 1.05 1.05 0 00-.25-.7 2.42 2.42 0 00-.83-.57zM11 11.32a2.34 2.34 0 00.33-1.26A2.32 2.32 0 0011 9a1.81 1.81 0 00-.7-.75 2 2 0 00-1-.26 2.11 2.11 0 00-1.08.27 1.86 1.86 0 00-.73.74 2.46 2.46 0 00-.26 1.14 2.26 2.26 0 00.24 1 1.76 1.76 0 00.69.74 2.06 2.06 0 001 .3l.86 1h1.21L10 12.08a1.79 1.79 0 001-.76zm-1-.25a.94.94 0 01-.76.35.92.92 0 01-.76-.36 1.52 1.52 0 01-.29-1 1.53 1.53 0 01.29-1 1 1 0 01.78-.37.87.87 0 01.75.37 1.62 1.62 0 01.27 1 1.46 1.46 0 01-.28 1.01z" fill="url(#b)"/></svg>
|
||||
|
Before Width: | Height: | Size: 2.0 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="8.88" y1="12.21" x2="8.88" y2=".21" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#0078d4"/><stop offset=".82" stop-color="#5ea0ef"/></linearGradient><linearGradient id="b" x1="8.88" y1="16.84" x2="8.88" y2="12.21" gradientUnits="userSpaceOnUse"><stop offset=".15" stop-color="#ccc"/><stop offset="1" stop-color="#707070"/></linearGradient></defs><rect x="-.12" y=".21" width="18" height="12" rx=".6" fill="url(#a)"/><path fill="#50e6ff" d="M11.88 4.46v3.49l-3 1.76v-3.5l3-1.75z"/><path fill="#c3f1ff" d="M11.88 4.46l-3 1.76-3-1.76 3-1.75 3 1.75z"/><path fill="#9cebff" d="M8.88 6.22v3.49l-3-1.76V4.46l3 1.76z"/><path fill="#c3f1ff" d="M5.88 7.95l3-1.74v3.5l-3-1.76z"/><path fill="#9cebff" d="M11.88 7.95l-3-1.74v3.5l3-1.76z"/><path d="M12.49 15.84c-1.78-.28-1.85-1.56-1.85-3.63H7.11c0 2.07-.06 3.35-1.84 3.63a1 1 0 00-.89 1h9a1 1 0 00-.89-1z" fill="url(#b)"/></svg>
|
||||
|
Before Width: | Height: | Size: 973 B |
@@ -1,7 +1,11 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" fill="none"><rect width="100" height="100" fill="url(#a)" rx="20"/><g fill="#fff" fill-rule="evenodd" clip-rule="evenodd" filter="url(#b)"><path d="M11 49.941v-.003l.002-.005.003-.014.007-.035a8.37 8.37 0 0 1 .105-.42c.073-.263.184-.624.348-1.072.328-.896.866-2.135 1.73-3.617 1.732-2.97 4.753-6.883 9.95-10.955 5.223-4.092 10.295-6.293 14.08-7.471a35.328 35.328 0 0 1 4.585-1.114 23.628 23.628 0 0 1 1.687-.223 9.17 9.17 0 0 1 .108-.009l.034-.002h.011l.007-.001s.002 0 .133 2.217c.13 2.218.132 2.218.132 2.218h-.002l-.053.004a19.098 19.098 0 0 0-1.326.178c-.809.136-1.937.37-3.302.763l-.127.043c-2.745.94-6.666 2.775-11.249 6.362-4.572 3.577-7.142 6.95-8.563 9.393-.711 1.222-1.137 2.215-1.383 2.889a9.995 9.995 0 0 0-.29.933c.008.037.022.095.044.173.046.166.123.423.246.76.246.674.672 1.667 1.383 2.89 1.421 2.441 3.991 5.815 8.563 9.392 4.584 3.587 8.504 5.423 11.25 6.362l.126.043c1.365.393 2.493.627 3.302.763a19.098 19.098 0 0 0 1.326.178l.053.004h.002s-.002 0-.133 2.218C43.66 75 43.657 75 43.657 75h-.007l-.011-.001-.034-.002a9.17 9.17 0 0 1-.478-.046 23.628 23.628 0 0 1-1.317-.186 35.328 35.328 0 0 1-4.584-1.114c-3.786-1.178-8.858-3.38-14.081-7.471-5.197-4.072-8.218-7.985-9.95-10.955-.864-1.482-1.402-2.72-1.73-3.617-.164-.448-.275-.81-.348-1.072a8.37 8.37 0 0 1-.105-.42l-.007-.035-.003-.014-.002-.005v-.121Zm78 0v-.003l-.002-.005-.002-.014-.008-.035a8.532 8.532 0 0 0-.105-.42 14.049 14.049 0 0 0-.348-1.072c-.328-.896-.866-2.135-1.73-3.617-1.732-2.97-4.753-6.883-9.95-10.955-5.223-4.092-10.295-6.293-14.08-7.471a35.328 35.328 0 0 0-4.585-1.114 23.628 23.628 0 0 0-1.687-.223 9.17 9.17 0 0 0-.108-.009l-.034-.002h-.011L56.343 25s-.002 0-.133 2.217c-.13 2.218-.132 2.218-.132 2.218h.002l.053.004a19.098 19.098 0 0 1 1.326.178c.809.136 1.937.37 3.302.763l.127.043c2.745.94 6.666 2.775 11.249 6.362 4.572 3.577 7.141 6.95 8.563 9.393.711 1.222 1.137 2.215 1.383 2.889a9.995 9.995 0 0 1 .29.933 9.995 9.995 0 0 1-.29.934c-.246.673-.672 1.666-1.383 2.888-1.422 2.442-3.991 5.816-8.563 9.393-4.584 3.587-8.504 5.423-11.25 6.362l-.126.043a30.108 30.108 0 0 1-3.302.763 19.098 19.098 0 0 1-1.326.178l-.053.004h-.002s.002 0 .133 2.218C56.34 75 56.343 75 56.343 75h.007l.011-.001.034-.002a9.17 9.17 0 0 0 .478-.046c.314-.034.758-.092 1.317-.186a35.328 35.328 0 0 0 4.584-1.114c3.786-1.178 8.858-3.38 14.081-7.471 5.197-4.072 8.218-7.985 9.95-10.955.864-1.482 1.402-2.72 1.73-3.617.164-.448.275-.81.348-1.072a8.532 8.532 0 0 0 .105-.42l.008-.035.002-.014.001-.005.001-.003v-.118Z"/><path d="M68.342 49.998c0 9.846-7.924 17.827-17.7 17.827-9.775 0-17.7-7.981-17.7-17.827 0-9.846 7.925-17.827 17.7-17.827 9.776 0 17.7 7.981 17.7 17.827ZM46.218 39.97s-2.127 2.508-2.766 4.457c-.412 1.257-.553 3.343-.553 3.343h-5.531s0-1.672 1.106-4.457c1.106-2.786 2.212-3.343 2.212-3.343h5.532Zm-2.766 15.6c.639 1.949 2.766 4.457 2.766 4.457h-5.532s-1.106-.557-2.212-3.343c-1.106-2.785-1.106-4.457-1.106-4.457h5.53s.142 2.086.554 3.343Z"/></g><defs><radialGradient id="a" cx="0" cy="0" r="1" gradientTransform="matrix(40.99997 42 -42 40.99997 50 50)" gradientUnits="userSpaceOnUse"><stop offset=".33" stop-color="#F76526"/><stop offset="1" stop-color="#F43030"/></radialGradient><filter id="b" width="90" height="62" x="5" y="23" color-interpolation-filters="sRGB" filterUnits="userSpaceOnUse"><feFlood flood-opacity="0" result="BackgroundImageFix"/><feColorMatrix in="SourceAlpha" result="hardAlpha" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/><feOffset dy="4"/><feGaussianBlur stdDeviation="3"/><feComposite in2="hardAlpha" operator="out"/><feColorMatrix values="0 0 0 0 0.368384 0 0 0 0 0.0623777 0 0 0 0 0.0623777 0 0 0 0.25 0"/><feBlend in2="BackgroundImageFix" result="effect1_dropShadow_3909_18731"/><feBlend in="SourceGraphic" in2="effect1_dropShadow_3909_18731" result="shape"/><feColorMatrix in="SourceAlpha" result="hardAlpha" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/><feOffset dy="4"/><feGaussianBlur stdDeviation="3"/><feComposite in2="hardAlpha" k2="-1" k3="1" operator="arithmetic"/><feColorMatrix values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.06 0"/><feBlend in2="shape" result="effect2_innerShadow_3909_18731"/></filter></defs></svg>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_2048_2251)">
|
||||
<path opacity="0.9" d="M8.02226 15.9866C3.56539 15.9866 -6.10352e-05 12.4896 -6.10352e-05 8.11832C-6.10352e-05 3.79075 3.56539 0.25 8.02226 0.25H13.0584C14.7075 0.25 15.9999 1.56139 15.9999 3.13506V8.11832C15.9999 12.4896 12.4345 15.9866 8.02226 15.9866Z" fill="#F25733"/>
|
||||
<path d="M7.95919 4.71207C4.63025 4.71207 2.75514 7.46868 2.67693 7.58603C2.48413 7.87508 2.48413 8.24888 2.67707 8.53816C2.75514 8.65528 4.63025 11.4119 7.95919 11.4119C11.2881 11.4119 13.1633 8.65528 13.2414 8.53792C13.4342 8.24888 13.4342 7.87508 13.2413 7.58582C13.1632 7.46868 11.2881 4.71207 7.95919 4.71207ZM3.13771 8.23088C3.06925 8.12832 3.06925 7.99571 3.13771 7.89307C3.20059 7.79867 4.53564 5.83764 6.92256 5.36723C5.84092 5.78476 5.07127 6.83485 5.07127 8.062C5.07127 9.28912 5.84092 10.3392 6.92256 10.7567C4.53564 10.2863 3.20059 8.32528 3.13771 8.23088ZM6.62838 8.062C6.62838 8.21488 6.50443 8.3388 6.35151 8.3388C6.19859 8.3388 6.07465 8.21488 6.07465 8.062C6.07465 7.02287 6.92003 6.17748 7.95916 6.17748C8.11207 6.17748 8.23599 6.30141 8.23599 6.45434C8.23599 6.60727 8.11207 6.73119 7.95916 6.73119C7.22535 6.73119 6.62838 7.32815 6.62838 8.062ZM7.95919 8.73504C7.58803 8.73504 7.2861 8.43312 7.2861 8.062C7.2861 7.69085 7.58803 7.3889 7.95919 7.3889C8.33039 7.3889 8.63231 7.69083 8.63231 8.062C8.63231 8.43312 8.33039 8.73504 7.95919 8.73504ZM12.7806 8.23088C12.7178 8.32528 11.3827 10.2863 8.99583 10.7567C10.0775 10.3392 10.8471 9.28912 10.8471 8.062C10.8471 6.83487 10.0775 5.78477 8.99583 5.36724C11.3827 5.83768 12.7178 7.7987 12.7806 7.89307C12.8491 7.99571 12.8491 8.12832 12.7806 8.23088Z" fill="#F9F2F9"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_2048_2251">
|
||||
<rect width="16" height="16" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 4.1 KiB After Width: | Height: | Size: 1.8 KiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 2.2 KiB |
@@ -53,7 +53,6 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"breadcrumb": "Messaging Queues",
|
||||
"header": "Kafka / Overview",
|
||||
"overview": {
|
||||
"title": "Start sending data in as little as 20 minutes",
|
||||
"subtitle": "Connect and Monitor Your Data Streams"
|
||||
},
|
||||
"configureConsumer": {
|
||||
"title": "Configure Consumer",
|
||||
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"configureProducer": {
|
||||
"title": "Configure Producer",
|
||||
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"monitorKafka": {
|
||||
"title": "Monitor kafka",
|
||||
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"summarySection": {
|
||||
"viewDetailsButton": "View Details"
|
||||
},
|
||||
"confirmModal": {
|
||||
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
||||
"okText": "Proceed"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"invite_user": "Invite your teammates",
|
||||
"invite": "Invite",
|
||||
"skip": "Skip",
|
||||
"invite_user_helper_text": "Not the right person to get started? No worries! Invite someone who can.",
|
||||
"select_use_case": "Select a use-case to get started",
|
||||
"get_started": "Get Started"
|
||||
}
|
||||
@@ -40,7 +40,6 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or "
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
|
||||
}
|
||||
|
||||
@@ -38,7 +38,5 @@
|
||||
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
||||
"SUPPORT": "SigNoz | Support",
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz",
|
||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview"
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz"
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"trialPlanExpired": "Trial Plan Expired",
|
||||
"gotQuestions": "Got Questions?",
|
||||
"contactUs": "Contact Us",
|
||||
"upgradeToContinue": "Upgrade to Continue",
|
||||
"upgradeNow": "Upgrade now to keep enjoying all the great features you’ve been using.",
|
||||
"yourDataIsSafe": "Your data is safe with us until",
|
||||
"actNow": "Act now to avoid any disruptions and continue where you left off.",
|
||||
"contactAdmin": "Contact your admin to proceed with the upgrade.",
|
||||
"continueMyJourney": "Continue My Journey",
|
||||
"needMoreTime": "Need More Time?",
|
||||
"extendTrial": "Extend Trial",
|
||||
"extendTrialMsgPart1": "If you have a specific reason why you were not able to finish your PoC in the trial period, please write to us on",
|
||||
"extendTrialMsgPart2": "with the reason. Sometimes we can extend trial by a few days on a case by case basis",
|
||||
"whyChooseSignoz": "Why choose Signoz",
|
||||
"enterpriseGradeObservability": "Enterprise-grade Observability",
|
||||
"observabilityDescription": "Get access to observability at any scale with advanced security and compliance.",
|
||||
"continueToUpgrade": "Continue to Upgrade",
|
||||
"youAreInGoodCompany": "You are in good company",
|
||||
"faqs": "FAQs",
|
||||
"somethingWentWrong": "Something went wrong"
|
||||
}
|
||||
@@ -53,7 +53,6 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -6,6 +6,5 @@
|
||||
"share": "Share",
|
||||
"save": "Save",
|
||||
"edit": "Edit",
|
||||
"logged_in": "Logged In",
|
||||
"pending_data_placeholder": "Just a bit of patience, just a little bit’s enough ⎯ we’re getting your {{dataSource}}!"
|
||||
"logged_in": "Logged In"
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"create_dashboard": "Create Dashboard",
|
||||
"import_json": "Import Dashboard JSON",
|
||||
"view_template": "View templates",
|
||||
"import_grafana_json": "Import Grafana JSON",
|
||||
"copy_to_clipboard": "Copy To ClipBoard",
|
||||
"download_json": "Download JSON",
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"breadcrumb": "Messaging Queues",
|
||||
"header": "Kafka / Overview",
|
||||
"overview": {
|
||||
"title": "Start sending data in as little as 20 minutes",
|
||||
"subtitle": "Connect and Monitor Your Data Streams"
|
||||
},
|
||||
"configureConsumer": {
|
||||
"title": "Configure Consumer",
|
||||
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"configureProducer": {
|
||||
"title": "Configure Producer",
|
||||
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"monitorKafka": {
|
||||
"title": "Monitor kafka",
|
||||
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
||||
"button": "Get Started"
|
||||
},
|
||||
"summarySection": {
|
||||
"viewDetailsButton": "View Details"
|
||||
},
|
||||
"confirmModal": {
|
||||
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
||||
"okText": "Proceed"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"invite_user": "Invite your teammates",
|
||||
"invite": "Invite",
|
||||
"skip": "Skip",
|
||||
"invite_user_helper_text": "Not the right person to get started? No worries! Invite someone who can.",
|
||||
"select_use_case": "Select a use-case to get started",
|
||||
"get_started": "Get Started"
|
||||
}
|
||||
@@ -40,7 +40,6 @@
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_last": "last",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or "
|
||||
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
"GET_STARTED_LOGS_MANAGEMENT": "SigNoz | Get Started | Logs",
|
||||
"GET_STARTED_INFRASTRUCTURE_MONITORING": "SigNoz | Get Started | Infrastructure",
|
||||
"GET_STARTED_AWS_MONITORING": "SigNoz | Get Started | AWS",
|
||||
"GET_STARTED_AZURE_MONITORING": "SigNoz | Get Started | AZURE",
|
||||
"TRACE": "SigNoz | Trace",
|
||||
"TRACE_DETAIL": "SigNoz | Trace Detail",
|
||||
"TRACES_EXPLORER": "SigNoz | Traces Explorer",
|
||||
@@ -49,8 +48,5 @@
|
||||
"TRACES_SAVE_VIEWS": "SigNoz | Traces Saved Views",
|
||||
"DEFAULT": "Open source Observability Platform | SigNoz",
|
||||
"SHORTCUTS": "SigNoz | Shortcuts",
|
||||
"INTEGRATIONS": "SigNoz | Integrations",
|
||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
|
||||
"MESSAGING_QUEUES": "SigNoz | Messaging Queues"
|
||||
"INTEGRATIONS": "SigNoz | Integrations"
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"trialPlanExpired": "Trial Plan Expired",
|
||||
"gotQuestions": "Got Questions?",
|
||||
"contactUs": "Contact Us",
|
||||
"upgradeToContinue": "Upgrade to Continue",
|
||||
"upgradeNow": "Upgrade now to keep enjoying all the great features you’ve been using.",
|
||||
"yourDataIsSafe": "Your data is safe with us until",
|
||||
"actNow": "Act now to avoid any disruptions and continue where you left off.",
|
||||
"contactAdmin": "Contact your admin to proceed with the upgrade.",
|
||||
"continueMyJourney": "Continue My Journey",
|
||||
"needMoreTime": "Need More Time?",
|
||||
"extendTrial": "Extend Trial",
|
||||
"extendTrialMsgPart1": "If you have a specific reason why you were not able to finish your PoC in the trial period, please write to us on",
|
||||
"extendTrialMsgPart2": "with the reason. Sometimes we can extend trial by a few days on a case by case basis",
|
||||
"whyChooseSignoz": "Why choose Signoz",
|
||||
"enterpriseGradeObservability": "Enterprise-grade Observability",
|
||||
"observabilityDescription": "Get access to observability at any scale with advanced security and compliance.",
|
||||
"continueToUpgrade": "Continue to Upgrade",
|
||||
"youAreInGoodCompany": "You are in good company",
|
||||
"faqs": "FAQs",
|
||||
"somethingWentWrong": "Something went wrong"
|
||||
}
|
||||
9
frontend/public/signoz-signup.svg
Normal file
|
After Width: | Height: | Size: 10 KiB |
@@ -76,8 +76,9 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
isUserFetching: false,
|
||||
},
|
||||
});
|
||||
|
||||
if (!isLoggedIn) {
|
||||
history.push(ROUTES.LOGIN, { from: pathname });
|
||||
history.push(ROUTES.LOGIN);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { ConfigProvider } from 'antd';
|
||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||
import setLocalStorageApi from 'api/browser/localstorage/set';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import NotFound from 'components/NotFound';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
@@ -12,15 +11,12 @@ import useAnalytics from 'hooks/analytics/useAnalytics';
|
||||
import { KeyboardHotkeysProvider } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||
import { useIsDarkMode, useThemeConfig } from 'hooks/useDarkMode';
|
||||
import { THEME_MODE } from 'hooks/useDarkMode/constant';
|
||||
import useFeatureFlags from 'hooks/useFeatureFlag';
|
||||
import useGetFeatureFlag from 'hooks/useGetFeatureFlag';
|
||||
import useLicense, { LICENSE_PLAN_KEY } from 'hooks/useLicense';
|
||||
import { NotificationProvider } from 'hooks/useNotifications';
|
||||
import { ResourceProvider } from 'hooks/useResourceAttribute';
|
||||
import history from 'lib/history';
|
||||
import { identity, pick, pickBy } from 'lodash-es';
|
||||
import posthog from 'posthog-js';
|
||||
import AlertRuleProvider from 'providers/Alert';
|
||||
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
|
||||
import { QueryBuilderProvider } from 'providers/QueryBuilder';
|
||||
import { Suspense, useEffect, useState } from 'react';
|
||||
@@ -42,7 +38,7 @@ import defaultRoutes, {
|
||||
|
||||
function App(): JSX.Element {
|
||||
const themeConfig = useThemeConfig();
|
||||
const { data: licenseData } = useLicense();
|
||||
const { data } = useLicense();
|
||||
const [routes, setRoutes] = useState<AppRoutes[]>(defaultRoutes);
|
||||
const { role, isLoggedIn: isLoggedInState, user, org } = useSelector<
|
||||
AppState,
|
||||
@@ -51,7 +47,7 @@ function App(): JSX.Element {
|
||||
|
||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
||||
|
||||
const { trackPageView } = useAnalytics();
|
||||
const { trackPageView, trackEvent } = useAnalytics();
|
||||
|
||||
const { hostname, pathname } = window.location;
|
||||
|
||||
@@ -59,13 +55,15 @@ function App(): JSX.Element {
|
||||
|
||||
const isDarkMode = useIsDarkMode();
|
||||
|
||||
const isChatSupportEnabled =
|
||||
useFeatureFlags(FeatureKeys.CHAT_SUPPORT)?.active || false;
|
||||
|
||||
const isPremiumSupportEnabled =
|
||||
useFeatureFlags(FeatureKeys.PREMIUM_SUPPORT)?.active || false;
|
||||
|
||||
const featureResponse = useGetFeatureFlag((allFlags) => {
|
||||
const isOnboardingEnabled =
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.ONBOARDING)?.active ||
|
||||
false;
|
||||
|
||||
const isChatSupportEnabled =
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.CHAT_SUPPORT)?.active ||
|
||||
false;
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_FEATURE_FLAG_RESPONSE,
|
||||
payload: {
|
||||
@@ -74,10 +72,6 @@ function App(): JSX.Element {
|
||||
},
|
||||
});
|
||||
|
||||
const isOnboardingEnabled =
|
||||
allFlags.find((flag) => flag.name === FeatureKeys.ONBOARDING)?.active ||
|
||||
false;
|
||||
|
||||
if (!isOnboardingEnabled || !isCloudUserVal) {
|
||||
const newRoutes = routes.filter(
|
||||
(route) => route?.path !== ROUTES.GET_STARTED,
|
||||
@@ -85,13 +79,23 @@ function App(): JSX.Element {
|
||||
|
||||
setRoutes(newRoutes);
|
||||
}
|
||||
|
||||
if (isLoggedInState && isChatSupportEnabled) {
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
window.Intercom('boot', {
|
||||
app_id: process.env.INTERCOM_APP_ID,
|
||||
email: user?.email || '',
|
||||
name: user?.name || '',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const isOnBasicPlan =
|
||||
licenseData?.payload?.licenses?.some(
|
||||
data?.payload?.licenses?.some(
|
||||
(license) =>
|
||||
license.isCurrent && license.planKey === LICENSE_PLAN_KEY.BASIC_PLAN,
|
||||
) || licenseData?.payload?.licenses === null;
|
||||
) || data?.payload?.licenses === null;
|
||||
|
||||
const enableAnalytics = (user: User): void => {
|
||||
const orgName =
|
||||
@@ -108,7 +112,9 @@ function App(): JSX.Element {
|
||||
};
|
||||
|
||||
const sanitizedIdentifyPayload = pickBy(identifyPayload, identity);
|
||||
|
||||
const domain = extractDomain(email);
|
||||
|
||||
const hostNameParts = hostname.split('.');
|
||||
|
||||
const groupTraits = {
|
||||
@@ -121,29 +127,10 @@ function App(): JSX.Element {
|
||||
};
|
||||
|
||||
window.analytics.identify(email, sanitizedIdentifyPayload);
|
||||
|
||||
window.analytics.group(domain, groupTraits);
|
||||
|
||||
posthog?.identify(email, {
|
||||
email,
|
||||
name,
|
||||
orgName,
|
||||
tenant_id: hostNameParts[0],
|
||||
data_region: hostNameParts[1],
|
||||
tenant_url: hostname,
|
||||
company_domain: domain,
|
||||
source: 'signoz-ui',
|
||||
isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription,
|
||||
});
|
||||
|
||||
posthog?.group('company', domain, {
|
||||
name: orgName,
|
||||
tenant_id: hostNameParts[0],
|
||||
data_region: hostNameParts[1],
|
||||
tenant_url: hostname,
|
||||
company_domain: domain,
|
||||
source: 'signoz-ui',
|
||||
isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription,
|
||||
});
|
||||
window.clarity('identify', email, name);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
@@ -157,6 +144,10 @@ function App(): JSX.Element {
|
||||
!isIdentifiedUser
|
||||
) {
|
||||
setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true');
|
||||
|
||||
if (isCloudUserVal) {
|
||||
enableAnalytics(user);
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -186,26 +177,6 @@ function App(): JSX.Element {
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [pathname]);
|
||||
|
||||
useEffect(() => {
|
||||
const showAddCreditCardModal =
|
||||
!isPremiumSupportEnabled &&
|
||||
!licenseData?.payload?.trialConvertedToSubscription;
|
||||
|
||||
if (isLoggedInState && isChatSupportEnabled && !showAddCreditCardModal) {
|
||||
window.Intercom('boot', {
|
||||
app_id: process.env.INTERCOM_APP_ID,
|
||||
email: user?.email || '',
|
||||
name: user?.name || '',
|
||||
});
|
||||
}
|
||||
}, [
|
||||
isLoggedInState,
|
||||
isChatSupportEnabled,
|
||||
user,
|
||||
licenseData,
|
||||
isPremiumSupportEnabled,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
if (user && user?.email && user?.userId && user?.name) {
|
||||
try {
|
||||
@@ -213,7 +184,7 @@ function App(): JSX.Element {
|
||||
LOCALSTORAGE.THEME_ANALYTICS_V1,
|
||||
);
|
||||
if (!isThemeAnalyticsSent) {
|
||||
logEvent('Theme Analytics', {
|
||||
trackEvent('Theme Analytics', {
|
||||
theme: isDarkMode ? THEME_MODE.DARK : THEME_MODE.LIGHT,
|
||||
user: pick(user, ['email', 'userId', 'name']),
|
||||
org,
|
||||
@@ -224,11 +195,6 @@ function App(): JSX.Element {
|
||||
console.error('Failed to parse local storage theme analytics event');
|
||||
}
|
||||
}
|
||||
|
||||
if (isCloudUserVal && user && user.email) {
|
||||
enableAnalytics(user);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [user]);
|
||||
|
||||
@@ -241,24 +207,22 @@ function App(): JSX.Element {
|
||||
<QueryBuilderProvider>
|
||||
<DashboardProvider>
|
||||
<KeyboardHotkeysProvider>
|
||||
<AlertRuleProvider>
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }) => (
|
||||
<Route
|
||||
key={`${path}`}
|
||||
exact={exact}
|
||||
path={path}
|
||||
component={component}
|
||||
/>
|
||||
))}
|
||||
<AppLayout>
|
||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||
<Switch>
|
||||
{routes.map(({ path, component, exact }) => (
|
||||
<Route
|
||||
key={`${path}`}
|
||||
exact={exact}
|
||||
path={path}
|
||||
component={component}
|
||||
/>
|
||||
))}
|
||||
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</AlertRuleProvider>
|
||||
<Route path="*" component={NotFound} />
|
||||
</Switch>
|
||||
</Suspense>
|
||||
</AppLayout>
|
||||
</KeyboardHotkeysProvider>
|
||||
</DashboardProvider>
|
||||
</QueryBuilderProvider>
|
||||
|
||||
@@ -92,14 +92,6 @@ export const CreateNewAlerts = Loadable(
|
||||
() => import(/* webpackChunkName: "Create Alerts" */ 'pages/CreateAlert'),
|
||||
);
|
||||
|
||||
export const AlertHistory = Loadable(
|
||||
() => import(/* webpackChunkName: "Alert History" */ 'pages/AlertList'),
|
||||
);
|
||||
|
||||
export const AlertOverview = Loadable(
|
||||
() => import(/* webpackChunkName: "Alert Overview" */ 'pages/AlertList'),
|
||||
);
|
||||
|
||||
export const CreateAlertChannelAlerts = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "Create Channels" */ 'pages/AlertChannelCreate'),
|
||||
@@ -212,15 +204,3 @@ export const InstalledIntegrations = Loadable(
|
||||
/* webpackChunkName: "InstalledIntegrations" */ 'pages/IntegrationsModulePage'
|
||||
),
|
||||
);
|
||||
|
||||
export const MessagingQueues = Loadable(
|
||||
() =>
|
||||
import(/* webpackChunkName: "MessagingQueues" */ 'pages/MessagingQueues'),
|
||||
);
|
||||
|
||||
export const MQDetailPage = Loadable(
|
||||
() =>
|
||||
import(
|
||||
/* webpackChunkName: "MQDetailPage" */ 'pages/MessagingQueues/MQDetailPage'
|
||||
),
|
||||
);
|
||||
|
||||
@@ -2,8 +2,6 @@ import ROUTES from 'constants/routes';
|
||||
import { RouteProps } from 'react-router-dom';
|
||||
|
||||
import {
|
||||
AlertHistory,
|
||||
AlertOverview,
|
||||
AllAlertChannels,
|
||||
AllErrors,
|
||||
APIKeys,
|
||||
@@ -25,8 +23,6 @@ import {
|
||||
LogsExplorer,
|
||||
LogsIndexToFields,
|
||||
LogsSaveViews,
|
||||
MessagingQueues,
|
||||
MQDetailPage,
|
||||
MySettings,
|
||||
NewDashboardPage,
|
||||
OldLogsExplorer,
|
||||
@@ -173,20 +169,6 @@ const routes: AppRoutes[] = [
|
||||
isPrivate: true,
|
||||
key: 'ALERTS_NEW',
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALERT_HISTORY,
|
||||
exact: true,
|
||||
component: AlertHistory,
|
||||
isPrivate: true,
|
||||
key: 'ALERT_HISTORY',
|
||||
},
|
||||
{
|
||||
path: ROUTES.ALERT_OVERVIEW,
|
||||
exact: true,
|
||||
component: AlertOverview,
|
||||
isPrivate: true,
|
||||
key: 'ALERT_OVERVIEW',
|
||||
},
|
||||
{
|
||||
path: ROUTES.TRACE,
|
||||
exact: true,
|
||||
@@ -369,20 +351,6 @@ const routes: AppRoutes[] = [
|
||||
isPrivate: true,
|
||||
key: 'INTEGRATIONS',
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES,
|
||||
exact: true,
|
||||
component: MessagingQueues,
|
||||
key: 'MESSAGING_QUEUES',
|
||||
isPrivate: true,
|
||||
},
|
||||
{
|
||||
path: ROUTES.MESSAGING_QUEUES_DETAIL,
|
||||
exact: true,
|
||||
component: MQDetailPage,
|
||||
key: 'MESSAGING_QUEUES_DETAIL',
|
||||
isPrivate: true,
|
||||
},
|
||||
];
|
||||
|
||||
export const SUPPORT_ROUTE: AppRoutes = {
|
||||
|
||||
@@ -9,9 +9,9 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
|
||||
// making the error status code as standard Error Status Code
|
||||
const statusCode = response.status as ErrorStatusCode;
|
||||
|
||||
const { data } = response as AxiosResponse;
|
||||
|
||||
if (statusCode >= 400 && statusCode < 500) {
|
||||
const { data } = response as AxiosResponse;
|
||||
|
||||
if (statusCode === 404) {
|
||||
return {
|
||||
statusCode,
|
||||
@@ -34,11 +34,12 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
|
||||
body: JSON.stringify((response.data as any).data),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
statusCode,
|
||||
payload: null,
|
||||
error: 'Something went wrong',
|
||||
message: data?.error,
|
||||
message: null,
|
||||
};
|
||||
}
|
||||
if (request) {
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/create';
|
||||
|
||||
const create = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
const response = await axios.post('/rules', {
|
||||
...props.data,
|
||||
});
|
||||
try {
|
||||
const response = await axios.post('/rules', {
|
||||
...props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default create;
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/delete';
|
||||
|
||||
const deleteAlerts = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
const response = await axios.delete(`/rules/${props.id}`);
|
||||
try {
|
||||
const response = await axios.delete(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data.rules,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default deleteAlerts;
|
||||
|
||||
@@ -1,16 +1,24 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/get';
|
||||
|
||||
const get = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
const response = await axios.get(`/rules/${props.id}`);
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
try {
|
||||
const response = await axios.get(`/rules/${props.id}`);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default get;
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/patch';
|
||||
|
||||
const patch = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
const response = await axios.patch(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
try {
|
||||
const response = await axios.patch(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default patch;
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { PayloadProps, Props } from 'types/api/alerts/save';
|
||||
|
||||
const put = async (
|
||||
props: Props,
|
||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||
const response = await axios.put(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
try {
|
||||
const response = await axios.put(`/rules/${props.id}`, {
|
||||
...props.data,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default put;
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleStatsPayload } from 'types/api/alerts/def';
|
||||
import { RuleStatsProps } from 'types/api/alerts/ruleStats';
|
||||
|
||||
const ruleStats = async (
|
||||
props: RuleStatsProps,
|
||||
): Promise<SuccessResponse<AlertRuleStatsPayload> | ErrorResponse> => {
|
||||
try {
|
||||
const response = await axios.post(`/rules/${props.id}/history/stats`, {
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default ruleStats;
|
||||
@@ -1,33 +0,0 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleTimelineGraphResponsePayload } from 'types/api/alerts/def';
|
||||
import { GetTimelineGraphRequestProps } from 'types/api/alerts/timelineGraph';
|
||||
|
||||
const timelineGraph = async (
|
||||
props: GetTimelineGraphRequestProps,
|
||||
): Promise<
|
||||
SuccessResponse<AlertRuleTimelineGraphResponsePayload> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.post(
|
||||
`/rules/${props.id}/history/overall_status`,
|
||||
{
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
},
|
||||
);
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default timelineGraph;
|
||||
@@ -1,36 +0,0 @@
|
||||
import axios from 'api';
|
||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||
import { AxiosError } from 'axios';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
import { AlertRuleTimelineTableResponsePayload } from 'types/api/alerts/def';
|
||||
import { GetTimelineTableRequestProps } from 'types/api/alerts/timelineTable';
|
||||
|
||||
const timelineTable = async (
|
||||
props: GetTimelineTableRequestProps,
|
||||
): Promise<
|
||||
SuccessResponse<AlertRuleTimelineTableResponsePayload> | ErrorResponse
|
||||
> => {
|
||||
try {
|
||||
const response = await axios.post(`/rules/${props.id}/history/timeline`, {
|
||||
start: props.start,
|
||||
end: props.end,
|
||||
offset: props.offset,
|
||||
limit: props.limit,
|
||||
order: props.order,
|
||||
state: props.state,
|
||||
// TODO(shaheer): implement filters
|
||||
filters: props.filters,
|
||||
});
|
||||
|
||||
return {
|
||||
statusCode: 200,
|
||||
error: null,
|
||||
message: response.data.status,
|
||||
payload: response.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return ErrorResponseHandler(error as AxiosError);
|
||||
}
|
||||
};
|
||||
|
||||
export default timelineTable;
|
||||