Compare commits

..

2 Commits

Author SHA1 Message Date
Srikanth Chekuri
a58d70079b Merge branch 'develop' into error-response 2024-06-15 20:17:12 +05:30
Srikanth Chekuri
2ae341cfa9 chore: error response std and improved error messages 2024-06-13 01:19:40 +05:30
1308 changed files with 19703 additions and 101361 deletions

6
.github/CODEOWNERS vendored
View File

@@ -5,6 +5,6 @@
/frontend/ @YounixM /frontend/ @YounixM
/frontend/src/container/MetricsApplication @srikanthccv /frontend/src/container/MetricsApplication @srikanthccv
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv /frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
/deploy/ @SigNoz/devops /deploy/ @prashant-shahi
/sample-apps/ @SigNoz/devops /sample-apps/ @prashant-shahi
.github @SigNoz/devops .github @prashant-shahi

View File

@@ -1,49 +0,0 @@
---
name: Request Dashboard
about: Request a new dashboard for the SigNoz Dashboards repository
title: '[Dashboard Request] '
labels: 'dashboard-template'
assignees: ''
---
<!-- Use this template to request a new dashboard for the SigNoz Dashboards repository. Providing detailed information will help us understand your needs better and speed up the dashboard creation process. -->
## Dashboard Name
<!-- Provide the name for the requested dashboard. Be specific (e.g., "MySQL Monitoring Dashboard"). -->
## Expected Dashboard Sections and Panels
(Can be tweaked (add or remove panels/sections) according to available metrics)
### Section Name
<!-- Brief description of what this section should display (e.g., "Resource usage metrics for MySQL database"). -->
### Panel Name
<!-- Description of the panel (e.g., "Displays current CPU usage, memory usage, etc."). -->
<!-- - **Example:**
- **Section**: Resource Metrics
- **Panel**: CPU Usage - Displays the current CPU usage across all database instances.
- **Panel**: Memory Usage - Displays the total memory used by the MySQL process. -->
<!-- Repeat this format for any additional sections or panels. -->
## Expected Dashboard Variables
<!-- List any dashboard variables that should be included in the dashboard. Examples could be `deployment.environment`, `hostname`, `region`, etc. -->
## Additional Comments or Requirements
<!-- Include any other details, special requirements, or specific visualizations you'd like to request for this dashboard. -->
## References or Screenshots
<!-- Add any references or screenshots of requested dashboard if available. -->
## 📋 Notes
Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request.

View File

@@ -8,13 +8,6 @@ on:
- release/v* - release/v*
jobs: jobs:
check-no-ee-references:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
build-frontend: build-frontend:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@@ -43,6 +36,7 @@ jobs:
run: | run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies - name: Install dependencies
run: cd frontend && yarn install run: cd frontend && yarn install
- name: Run ESLint - name: Run ESLint

View File

@@ -1,83 +0,0 @@
name: "Update PR labels and Block PR until related docs are shipped for the feature"
on:
pull_request:
branches:
- develop
types: [opened, edited, labeled, unlabeled]
permissions:
pull-requests: write
contents: read
jobs:
docs_label_check:
runs-on: ubuntu-latest
steps:
- name: Check PR Title and Manage Labels
uses: actions/github-script@v6
with:
script: |
const prTitle = context.payload.pull_request.title;
const prNumber = context.payload.pull_request.number;
const owner = context.repo.owner;
const repo = context.repo.repo;
// Fetch the current PR details to get labels
const pr = await github.rest.pulls.get({
owner,
repo,
pull_number: prNumber
});
const labels = pr.data.labels.map(label => label.name);
if (prTitle.startsWith('feat:')) {
const hasDocsRequired = labels.includes('docs required');
const hasDocsShipped = labels.includes('docs shipped');
const hasDocsNotRequired = labels.includes('docs not required');
// If "docs not required" is present, skip the checks
if (hasDocsNotRequired && !hasDocsRequired) {
console.log("Skipping checks due to 'docs not required' label.");
return; // Exit the script early
}
// If "docs shipped" is present, remove "docs required" if it exists
if (hasDocsShipped && hasDocsRequired) {
await github.rest.issues.removeLabel({
owner,
repo,
issue_number: prNumber,
name: 'docs required'
});
console.log("Removed 'docs required' label.");
}
// Add "docs required" label if neither "docs shipped" nor "docs required" are present
if (!hasDocsRequired && !hasDocsShipped) {
await github.rest.issues.addLabels({
owner,
repo,
issue_number: prNumber,
labels: ['docs required']
});
console.log("Added 'docs required' label.");
}
}
// Fetch the updated labels after any changes
const updatedPr = await github.rest.pulls.get({
owner,
repo,
pull_number: prNumber
});
const updatedLabels = updatedPr.data.labels.map(label => label.name);
const updatedHasDocsRequired = updatedLabels.includes('docs required');
const updatedHasDocsShipped = updatedLabels.includes('docs shipped');
// Block PR if "docs required" is still present and "docs shipped" is missing
if (updatedHasDocsRequired && !updatedHasDocsShipped) {
core.setFailed("This PR requires documentation. Please remove the 'docs required' label and add the 'docs shipped' label to proceed.");
}

View File

@@ -9,6 +9,7 @@ on:
- v* - v*
jobs: jobs:
image-build-and-push-query-service: image-build-and-push-query-service:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@@ -150,13 +151,13 @@ jobs:
run: | run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Install dependencies - name: Install dependencies
working-directory: frontend working-directory: frontend
run: yarn install run: yarn install

View File

@@ -30,7 +30,6 @@ jobs:
GCP_PROJECT: ${{ secrets.GCP_PROJECT }} GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }} GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }} GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
run: | run: |
read -r -d '' COMMAND <<EOF || true read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}" echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
@@ -38,7 +37,6 @@ jobs:
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main" export OTELCOL_TAG="main"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
export KAFKA_SPAN_EVAL="true"
docker system prune --force docker system prune --force
docker pull signoz/signoz-otel-collector:main docker pull signoz/signoz-otel-collector:main
docker pull signoz/signoz-schema-migrator:main docker pull signoz/signoz-schema-migrator:main
@@ -53,4 +51,4 @@ jobs:
make build-frontend-amd64 make build-frontend-amd64
make run-testing make run-testing
EOF EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}" gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -30,7 +30,6 @@ jobs:
GCP_PROJECT: ${{ secrets.GCP_PROJECT }} GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }} GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }} GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
run: | run: |
read -r -d '' COMMAND <<EOF || true read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}" echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
@@ -53,4 +52,4 @@ jobs:
make build-frontend-amd64 make build-frontend-amd64
make run-testing make run-testing
EOF EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}" gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

3
.gitignore vendored
View File

@@ -67,6 +67,3 @@ e2e/.auth
# go # go
vendor/ vendor/
**/main/** **/main/**
# git-town
.git-branches.toml

View File

@@ -0,0 +1,7 @@
#!/bin/sh
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml

View File

@@ -30,7 +30,6 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development) - [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-) - [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development) - [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
- [Other Ways to Contribute](#other-ways-to-contribute) - [Other Ways to Contribute](#other-ways-to-contribute)
# 1. General Instructions 📝 # 1. General Instructions 📝
@@ -38,7 +37,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
## 1.1 For Creating Issue(s) ## 1.1 For Creating Issue(s)
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy) **Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
#### Details like these are incredibly useful: #### Details like these are incredibly useful:
@@ -57,7 +56,7 @@ Before making any significant changes and before filing a new issue, please chec
Discussing your proposed changes ahead of time will make the contribution Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone 🙌. process smooth for everyone 🙌.
**[`^top^`](#contributing-guidelines)** **[`^top^`](#)**
<hr> <hr>
@@ -98,14 +97,13 @@ GitHub provides additional document on [forking a repository](https://help.githu
stability and quality of the component. stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack). You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
### Pointers: ### Pointers:
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) - If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**. - If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) - If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions) - If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
<hr> <hr>
@@ -119,7 +117,7 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :) - Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
**[`^top^`](#contributing-guidelines)** **[`^top^`](#)**
<hr> <hr>
@@ -129,13 +127,14 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React) - [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go) - [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area. Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it. **Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
**[`^top^`](#contributing-guidelines)** ⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
**[`^top^`](#)**
<hr> <hr>
@@ -189,7 +188,7 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
### Important Notes: ### Important Notes:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh) The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**[`^top^`](#contributing-guidelines)** **[`^top^`](#)**
## 3.2 Contribute to Frontend without installing SigNoz backend ## 3.2 Contribute to Frontend without installing SigNoz backend
@@ -210,7 +209,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services) **Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
**[`^top^`](#contributing-guidelines)** **[`^top^`](#)**
<hr> <hr>
@@ -310,7 +309,7 @@ Click the button below. A workspace with all required environments will be creat
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` --> > To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
**[`^top^`](#contributing-guidelines)** **[`^top^`](#)**
<hr> <hr>
@@ -348,7 +347,7 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
```bash ```bash
kubectl -n sample-application run strzal --image=djbingham/curl \ kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \ --restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
``` ```
**5.1.3 To stop the load generation:** **5.1.3 To stop the load generation:**
@@ -366,21 +365,10 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
| HOTROD_NAMESPACE=sample-application bash | HOTROD_NAMESPACE=sample-application bash
``` ```
**[`^top^`](#contributing-guidelines)** **[`^top^`](#)**
--- ---
# 6. Contribute to Dashboards 📈
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
1. Create a dashboard JSON file.
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
3. Include screenshots of the dashboard in the `assets/` directory.
4. Submit a pull request for review.
## Other Ways to Contribute ## Other Ways to Contribute
There are many other ways to get involved with the community and to participate in this project: There are many other ways to get involved with the community and to participate in this project:
@@ -391,6 +379,7 @@ There are many other ways to get involved with the community and to participate
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack). - Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
- Tell others about the project on Twitter, your blog, etc. - Tell others about the project on Twitter, your blog, etc.
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :) Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
Thank You! Thank You!

View File

@@ -8,7 +8,6 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1 DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants. # Internal variables or constants.
@@ -34,9 +33,8 @@ buildHash=${PACKAGE}/pkg/query-service/version.buildHash
buildTime=${PACKAGE}/pkg/query-service/version.buildTime buildTime=${PACKAGE}/pkg/query-service/version.buildTime
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL} LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO} DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service all: build-push-frontend build-push-query-service
@@ -81,7 +79,7 @@ build-query-service-static:
@if [ $(DEV_BUILD) != "" ]; then \ @if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \ cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \ CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \ -ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \ else \
cd $(QUERY_SERVICE_DIRECTORY) && \ cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \ CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
@@ -180,14 +178,13 @@ clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \ @docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*" sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
check-no-ee-references:
@echo "Checking for 'ee' package references in 'pkg' directory..."
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
exit 1; \
else \
echo "No references to 'ee' packages found in 'pkg' directory"; \
fi
test: test:
go test ./pkg/query-service/... go test ./pkg/query-service/app/metrics/...
go test ./pkg/query-service/cache/...
go test ./pkg/query-service/app/...
go test ./pkg/query-service/app/querier/...
go test ./pkg/query-service/converter/...
go test ./pkg/query-service/formatter/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/rules/...
go test ./pkg/query-service/collectorsimulator/...

208
README.md
View File

@@ -1,11 +1,8 @@
<h1 align="center" style="border-bottom: none"> <p align="center">
<a href="https://signoz.io" target="_blank"> <img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<img alt="SigNoz" src="https://github.com/user-attachments/assets/ef9a33f7-12d7-4c94-8908-0a02b22f0c18" width="100" height="100">
</a>
<br>SigNoz
</h1>
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p> <p align="center">Monitor your applications and troubleshoot problems in your deployed applications, an open-source alternative to DataDog, New Relic, etc.</p>
</p>
<p align="center"> <p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a> <img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
@@ -24,115 +21,55 @@
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a> <a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3> </h3>
## Features ##
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
👉 Visualise Metrics, Traces and Logs in a single pane of glass
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
👉 Run aggregates on trace data to get business relevant metrics
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
👉 Record exceptions automatically in Python, Java, Ruby, and Javascript
👉 Easy to set alerts with DIY query builder
### Application Performance Monitoring ### Application Metrics
Use SigNoz APM to monitor your applications and services. It comes with out-of-box charts for key application metrics like p99 latency, error rate, Apdex and operations per second. You can also monitor the database and external calls made from your application. Read [more](https://signoz.io/application-performance-monitoring/). ![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
You can [instrument](https://signoz.io/docs/instrumentation/) your application with OpenTelemetry to get started.
![apm-cover](https://github.com/user-attachments/assets/fa5c0396-0854-4c8b-b972-9b62fd2a70d2)
### Logs Management
SigNoz can be used as a centralized log management solution. We use ClickHouse (used by likes of Uber & Cloudflare) as a datastore, ⎯ an extremely fast and highly optimized storage for logs data. Instantly search through all your logs using quick filters and a powerful query builder.
You can also create charts on your logs and monitor them with customized dashboards. Read [more](https://signoz.io/log-management/).
![logs-management-cover](https://github.com/user-attachments/assets/343588ee-98fb-4310-b3d2-c5bacf9c7384)
### Distributed Tracing ### Distributed Tracing
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
Distributed Tracing is essential to troubleshoot issues in microservices applications. Powered by OpenTelemetry, distributed tracing in SigNoz can help you track user requests across services to help you identify performance bottlenecks. <img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
See user requests in a detailed breakdown with the help of Flamegraphs and Gantt Charts. Click on any span to see the entire trace represented beautifully, which will help you make sense of where issues actually occurred in the flow of requests. ### Logs Management
Read [more](https://signoz.io/distributed-tracing/). <img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
![distributed-tracing-cover](https://github.com/user-attachments/assets/9bfe060a-0c40-4922-9b55-8a97e1a4076c) ### Infrastructure Monitoring
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### Metrics and Dashboards
Ingest metrics from your infrastructure or applications and create customized dashboards to monitor them. Create visualization that suits your needs with a variety of panel types like pie chart, time-series, bar chart, etc.
Create queries on your metrics data quickly with an easy-to-use metrics query builder. Add multiple queries and combine those queries with formulae to create really complex queries quickly.
Read [more](https://signoz.io/metrics-and-dashboards/).
![metrics-n-dashboards-cover](https://github.com/user-attachments/assets/a536fd71-1d2c-4681-aa7e-516d754c47a5)
### Alerts
Use alerts in SigNoz to get notified when anything unusual happens in your application. You can set alerts on any type of telemetry signal (logs, metrics, traces), create thresholds and set up a notification channel to get notified. Advanced features like alert history and anomaly detection can help you create smarter alerts.
Alerts in SigNoz help you identify issues proactively so that you can address them before they reach your customers.
Read [more](https://signoz.io/alerts-management/).
![alerts-cover](https://github.com/user-attachments/assets/03873bb8-1b62-4adf-8f56-28bb7b1750ea)
### Exceptions Monitoring ### Exceptions Monitoring
Monitor exceptions automatically in Python, Java, Ruby, and Javascript. For other languages, just drop in a few lines of code and start monitoring exceptions. ![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
See the detailed stack trace for all exceptions caught in your application. You can also log in custom attributes to add more context to your exceptions. For example, you can add attributes to identify users for which exceptions occurred.
Read [more](https://signoz.io/exceptions-monitoring/).
![exceptions-cover](https://github.com/user-attachments/assets/4be37864-59f2-4e8a-8d6e-e29ad04298c5) ### Alerts
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
<br /><br /> <br /><br />
## Why SigNoz?
SigNoz is a single tool for all your monitoring and observability needs. Here are a few reasons why you should choose SigNoz:
- Single tool for observability(logs, metrics, and traces)
- Built on top of [OpenTelemetry](https://opentelemetry.io/), the open-source standard which frees you from any type of vendor lock-in
- Correlated logs, metrics and traces for much richer context while debugging
- Uses ClickHouse (used by likes of Uber & Cloudflare) as datastore - an extremely fast and highly optimized storage for observability data
- DIY Query builder, PromQL, and ClickHouse queries to fulfill all your use-cases around querying observability data
- Open-Source - you can use open-source, our [cloud service](https://signoz.io/teams/) or a mix of both based on your use case
## Getting Started
### Create a SigNoz Cloud Account
SigNoz cloud is the easiest way to get started with SigNoz. Our cloud service is for those users who want to spend more time in getting insights for their application performance without worrying about maintenance.
[Get started for free](https://signoz.io/teams/)
### Deploy using Docker(self-hosted)
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
<p>&nbsp </p>
### Deploy in Kubernetes using Helm(self-hosted)
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
<br /><br />
We also offer managed services in your infra. Check our [pricing plans](https://signoz.io/pricing/) for all details.
## Join our Slack community ## Join our Slack community
@@ -141,22 +78,64 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
<br /><br /> <br /><br />
## Features:
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
- Slowest endpoints in your application
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
- Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
- Easy to set alerts with DIY query builder
<br /><br />
## Why SigNoz?
Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency.
We wanted to make a self-hosted & open source version of tools like DataDog, NewRelic for companies that have privacy and security concerns about having customer data going to third party services.
Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities
### Languages supported: ### Languages supported:
SigNoz supports all major programming languages for monitoring. Any framework and language supported by OpenTelemetry is supported by SigNoz. Find instructions for instrumenting different languages below: We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are:
- [Java](https://signoz.io/docs/instrumentation/java/) - Java
- [Python](https://signoz.io/docs/instrumentation/python/) - Python
- [Node.js or Javascript](https://signoz.io/docs/instrumentation/javascript/) - Node.js
- [Go](https://signoz.io/docs/instrumentation/golang/) - Go
- [PHP](https://signoz.io/docs/instrumentation/php/) - PHP
- [.NET](https://signoz.io/docs/instrumentation/dotnet/) - .NET
- [Ruby](https://signoz.io/docs/instrumentation/ruby-on-rails/) - Ruby
- [Elixir](https://signoz.io/docs/instrumentation/elixir/) - Elixir
- [Rust](https://signoz.io/docs/instrumentation/rust/) - Rust
- [Swift](https://signoz.io/docs/instrumentation/swift/)
You can find our entire documentation [here](https://signoz.io/docs/introduction/).
You can find the complete list of languages here - https://opentelemetry.io/docs/
<br /><br />
## Getting Started
### Deploy using Docker
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
<p>&nbsp </p>
### Deploy in Kubernetes using Helm
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
<br /><br /> <br /><br />
@@ -165,11 +144,9 @@ You can find our entire documentation [here](https://signoz.io/docs/introduction
### SigNoz vs Prometheus ### SigNoz vs Prometheus
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics, logs and traces, then current experience of stitching together Prometheus & other tools is not great. Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
SigNoz is a one-stop solution for metrics and other telemetry signals. And because you will use the same standard(OpenTelemetry) to collect all telemetry signals, you can also correlate these signals to troubleshoot quickly. Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
For example, if you see that there are issues with infrastructure metrics of your k8s cluster at a timestamp, you can jump to other signals like logs and traces to understand the issue quickly.
<p>&nbsp </p> <p>&nbsp </p>
@@ -181,7 +158,6 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
- Jaegar UI doesnt show any metrics on traces or on filtered traces - Jaegar UI doesnt show any metrics on traces or on filtered traces
- Jaeger cant get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz - Jaeger cant get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
- You can also go from traces to logs easily in SigNoz
<p>&nbsp </p> <p>&nbsp </p>
@@ -222,14 +198,14 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
#### Frontend #### Frontend
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM) - [Yunus M](https://github.com/YounixM)
- [Vikrant Gupta](https://github.com/vikrantgupta25) - [Rajat Dabade](https://github.com/Rajat-Dabade)
- [Sagar Rajput](https://github.com/SagarRajput-7)
#### DevOps #### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi) - [Prashant Shahi](https://github.com/prashant-shahi)
- [Vibhu Pandey](https://github.com/grandwizard28) - [Dhawal Sanghvi](https://github.com/dhawal1248)
<br /><br /> <br /><br />

View File

@@ -23,9 +23,6 @@
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114 [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
--> -->
<level>information</level> <level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log> <log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog> <errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<!-- Rotation policy <!-- Rotation policy
@@ -652,12 +649,12 @@
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
--> -->
<!--
<macros> <macros>
<shard>01</shard> <shard>01</shard>
<replica>example01-01-1</replica> <replica>example01-01-1</replica>
</macros> </macros>
-->
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. --> <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->

View File

@@ -133,7 +133,7 @@ services:
# - ./data/clickhouse-3/:/var/lib/clickhouse/ # - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager: alertmanager:
image: signoz/alertmanager:0.23.7 image: signoz/alertmanager:0.23.5
volumes: volumes:
- ./data/alertmanager:/data - ./data/alertmanager:/data
command: command:
@@ -146,11 +146,11 @@ services:
condition: on-failure condition: on-failure
query-service: query-service:
image: signoz/query-service:0.61.0 image: signoz/query-service:0.46.0
command: command:
[ [
"-config=/root/config/prometheus.yml", "-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true" # "--prefer-delta=true"
] ]
# ports: # ports:
# - "6060:6060" # pprof port # - "6060:6060" # pprof port
@@ -186,7 +186,7 @@ services:
<<: *db-depend <<: *db-depend
frontend: frontend:
image: signoz/frontend:0.61.0 image: signoz/frontend:0.46.0
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@@ -199,7 +199,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector: otel-collector:
image: signoz/signoz-otel-collector:0.111.14 image: signoz/signoz-otel-collector:0.88.24
command: command:
[ [
"--config=/etc/otel-collector-config.yaml", "--config=/etc/otel-collector-config.yaml",
@@ -211,9 +211,9 @@ services:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro - /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}} - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false - LOW_CARDINAL_EXCEPTION_GROUPING=false
ports: ports:
# - "1777:1777" # pprof extension # - "1777:1777" # pprof extension
@@ -237,15 +237,13 @@ services:
- query-service - query-service
otel-collector-migrator: otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.111.14 image: signoz/signoz-schema-migrator:0.88.24
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
delay: 5s delay: 5s
command: command:
- "sync" - "--dsn=tcp://clickhouse:9000"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on: depends_on:
- clickhouse - clickhouse
# - clickhouse-2 # - clickhouse-2

View File

@@ -36,7 +36,6 @@ receivers:
# endpoint: 0.0.0.0:6832 # endpoint: 0.0.0.0:6832
hostmetrics: hostmetrics:
collection_interval: 30s collection_interval: 30s
root_path: /hostfs
scrapers: scrapers:
cpu: {} cpu: {}
load: {} load: {}
@@ -66,6 +65,28 @@ processors:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure. detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
timeout: 2s timeout: 2s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter: # memory_limiter:
# # 80% of maximum memory up to 2G # # 80% of maximum memory up to 2G
# limit_mib: 1500 # limit_mib: 1500
@@ -109,20 +130,19 @@ processors:
exporters: exporters:
clickhousetraces: clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite: clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion: resource_to_telemetry_conversion:
enabled: true enabled: true
clickhousemetricswrite/prometheus: clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
# logging: {} # logging: {}
clickhouselogsexporter: clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/signoz_logs
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s timeout: 10s
use_new_schema: true
extensions: extensions:
health_check: health_check:
endpoint: 0.0.0.0:13133 endpoint: 0.0.0.0:13133
@@ -133,28 +153,26 @@ extensions:
service: service:
telemetry: telemetry:
logs:
encoding: json
metrics: metrics:
address: 0.0.0.0:8888 address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof] extensions: [health_check, zpages, pprof]
pipelines: pipelines:
traces: traces:
receivers: [jaeger, otlp] receivers: [jaeger, otlp]
processors: [signozspanmetrics/delta, batch] processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
exporters: [clickhousetraces] exporters: [clickhousetraces]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2] exporters: [clickhousemetricswrite]
metrics/hostmetrics: metrics/generic:
receivers: [hostmetrics] receivers: [hostmetrics]
processors: [resourcedetection, batch] processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2] exporters: [clickhousemetricswrite]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2] exporters: [clickhousemetricswrite/prometheus]
logs: logs:
receivers: [otlp, tcplog/docker] receivers: [otlp, tcplog/docker]
processors: [batch] processors: [batch]

View File

@@ -23,9 +23,6 @@
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114 [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
--> -->
<level>information</level> <level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log> <log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog> <errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<!-- Rotation policy <!-- Rotation policy
@@ -652,12 +649,12 @@
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
--> -->
<!--
<macros> <macros>
<shard>01</shard> <shard>01</shard>
<replica>example01-01-1</replica> <replica>example01-01-1</replica>
</macros> </macros>
-->
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. --> <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->

View File

@@ -1,8 +1,5 @@
version: "2.4" version: "2.4"
include:
- test-app-docker-compose.yaml
services: services:
zookeeper-1: zookeeper-1:
image: bitnami/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.1
@@ -57,7 +54,7 @@ services:
alertmanager: alertmanager:
container_name: signoz-alertmanager container_name: signoz-alertmanager
image: signoz/alertmanager:0.23.7 image: signoz/alertmanager:0.23.5
volumes: volumes:
- ./data/alertmanager:/data - ./data/alertmanager:/data
depends_on: depends_on:
@@ -69,12 +66,10 @@ services:
- --storage.path=/data - --storage.path=/data
otel-collector-migrator: otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
container_name: otel-migrator container_name: otel-migrator
command: command:
- "sync"
- "--dsn=tcp://clickhouse:9000" - "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on: depends_on:
clickhouse: clickhouse:
condition: service_healthy condition: service_healthy
@@ -86,7 +81,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector: otel-collector:
container_name: signoz-otel-collector container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.111.14 image: signoz/signoz-otel-collector:0.88.24
command: command:
[ [
"--config=/etc/otel-collector-config.yaml", "--config=/etc/otel-collector-config.yaml",
@@ -98,8 +93,6 @@ services:
volumes: volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
ports: ports:
@@ -133,3 +126,29 @@ services:
depends_on: depends_on:
- otel-collector - otel-collector
restart: on-failure restart: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -25,7 +25,7 @@ services:
command: command:
[ [
"-config=/root/config/prometheus.yml", "-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true" # "--prefer-delta=true"
] ]
ports: ports:
- "6060:6060" - "6060:6060"

View File

@@ -1,8 +1,5 @@
version: "2.4" version: "2.4"
include:
- test-app-docker-compose.yaml
x-clickhouse-defaults: &clickhouse-defaults x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab # addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
@@ -152,7 +149,7 @@ services:
# - ./user_scripts:/var/lib/clickhouse/user_scripts/ # - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager: alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7} image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
container_name: signoz-alertmanager container_name: signoz-alertmanager
volumes: volumes:
- ./data/alertmanager:/data - ./data/alertmanager:/data
@@ -167,13 +164,13 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service: query-service:
image: signoz/query-service:${DOCKER_TAG:-0.61.0} image: signoz/query-service:${DOCKER_TAG:-0.46.0}
container_name: signoz-query-service container_name: signoz-query-service
command: command:
[ [
"-config=/root/config/prometheus.yml", "-config=/root/config/prometheus.yml",
"-gateway-url=https://api.staging.signoz.cloud", "-gateway-url=https://api.staging.signoz.cloud"
"--use-logs-new-schema=true" # "--prefer-delta=true"
] ]
# ports: # ports:
# - "6060:6060" # pprof port # - "6060:6060" # pprof port
@@ -191,7 +188,6 @@ services:
- GODEBUG=netdns=go - GODEBUG=netdns=go
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd - DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
restart: on-failure restart: on-failure
healthcheck: healthcheck:
test: test:
@@ -208,7 +204,7 @@ services:
<<: *db-depend <<: *db-depend
frontend: frontend:
image: signoz/frontend:${DOCKER_TAG:-0.61.0} image: signoz/frontend:${DOCKER_TAG:-0.46.0}
container_name: signoz-frontend container_name: signoz-frontend
restart: on-failure restart: on-failure
depends_on: depends_on:
@@ -220,7 +216,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator: otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
container_name: otel-migrator container_name: otel-migrator
command: command:
- "--dsn=tcp://clickhouse:9000" - "--dsn=tcp://clickhouse:9000"
@@ -234,7 +230,7 @@ services:
otel-collector: otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14} image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
container_name: signoz-otel-collector container_name: signoz-otel-collector
command: command:
[ [
@@ -248,9 +244,9 @@ services:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro - /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false - LOW_CARDINAL_EXCEPTION_GROUPING=false
ports: ports:
# - "1777:1777" # pprof extension # - "1777:1777" # pprof extension
@@ -283,3 +279,29 @@ services:
depends_on: depends_on:
- otel-collector - otel-collector
restart: on-failure restart: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,3 +1,306 @@
include: version: "2.4"
- test-app-docker-compose.yaml
- docker-compose-minimal.yaml x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml"
# "--prefer-delta=true"
]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -36,7 +36,6 @@ receivers:
# endpoint: 0.0.0.0:6832 # endpoint: 0.0.0.0:6832
hostmetrics: hostmetrics:
collection_interval: 30s collection_interval: 30s
root_path: /hostfs
scrapers: scrapers:
cpu: {} cpu: {}
load: {} load: {}
@@ -57,11 +56,35 @@ receivers:
labels: labels:
job_name: otel-collector job_name: otel-collector
processors: processors:
batch: batch:
send_batch_size: 10000 send_batch_size: 10000
send_batch_max_size: 11000 send_batch_max_size: 11000
timeout: 10s timeout: 10s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter: # memory_limiter:
# # 80% of maximum memory up to 2G # # 80% of maximum memory up to 2G
# limit_mib: 1500 # limit_mib: 1500
@@ -118,25 +141,22 @@ extensions:
exporters: exporters:
clickhousetraces: clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite: clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion: resource_to_telemetry_conversion:
enabled: true enabled: true
clickhousemetricswrite/prometheus: clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter: clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/signoz_logs
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s timeout: 10s
use_new_schema: true
# logging: {} # logging: {}
service: service:
telemetry: telemetry:
logs:
encoding: json
metrics: metrics:
address: 0.0.0.0:8888 address: 0.0.0.0:8888
extensions: extensions:
@@ -146,20 +166,20 @@ service:
pipelines: pipelines:
traces: traces:
receivers: [jaeger, otlp] receivers: [jaeger, otlp]
processors: [signozspanmetrics/delta, batch] processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
exporters: [clickhousetraces] exporters: [clickhousetraces]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2] exporters: [clickhousemetricswrite]
metrics/hostmetrics: metrics/generic:
receivers: [hostmetrics] receivers: [hostmetrics]
processors: [resourcedetection, batch] processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2] exporters: [clickhousemetricswrite]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2] exporters: [clickhousemetricswrite/prometheus]
logs: logs:
receivers: [otlp, tcplog/docker] receivers: [otlp, tcplog/docker]
processors: [batch] processors: [batch]

View File

@@ -1,26 +0,0 @@
services:
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,8 +1,3 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server { server {
listen 3301; listen 3301;
server_name _; server_name _;
@@ -47,14 +42,6 @@ server {
proxy_read_timeout 600s; proxy_read_timeout 600s;
} }
location /ws {
proxy_pass http://query-service:8080/ws;
proxy_http_version 1.1;
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
# redirect server error pages to the static page /50x.html # redirect server error pages to the static page /50x.html
# #
error_page 500 502 503 504 /50x.html; error_page 500 502 503 504 /50x.html;

View File

@@ -389,7 +389,7 @@ trap bye EXIT
URL="https://api.segment.io/v1/track" URL="https://api.segment.io/v1/track"
HEADER_1="Content-Type: application/json" HEADER_1="Content-Type: application/json"
HEADER_2="Authorization: Basic OWtScko3b1BDR1BFSkxGNlFqTVBMdDVibGpGaFJRQnI=" HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
send_event() { send_event() {
error="" error=""

View File

@@ -1,5 +1,5 @@
# use a minimal alpine image # use a minimal alpine image
FROM alpine:3.20.3 FROM alpine:3.18.6
# Add Maintainer Info # Add Maintainer Info
LABEL maintainer="signoz" LABEL maintainer="signoz"

View File

@@ -1,44 +0,0 @@
package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type DailyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*DailyProvider)(nil)
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &dp.BaseSeasonalProvider
}
// NewDailyProvider uses the same generic option type
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
dp := &DailyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(dp)
}
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: dp.reader,
Cache: dp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: dp.fluxInterval,
FeatureLookup: dp.ff,
})
return dp
}
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityDaily
return p.getAnomalies(ctx, req)
}

View File

@@ -1,44 +0,0 @@
package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type HourlyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*HourlyProvider)(nil)
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &hp.BaseSeasonalProvider
}
// NewHourlyProvider now uses the generic option type
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
hp := &HourlyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(hp)
}
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: hp.reader,
Cache: hp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: hp.fluxInterval,
FeatureLookup: hp.ff,
})
return hp
}
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityHourly
return p.getAnomalies(ctx, req)
}

View File

@@ -1,248 +0,0 @@
package anomaly
import (
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/common"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type Seasonality string
const (
SeasonalityHourly Seasonality = "hourly"
SeasonalityDaily Seasonality = "daily"
SeasonalityWeekly Seasonality = "weekly"
)
func (s Seasonality) String() string {
return string(s)
}
var (
oneWeekOffset = 24 * 7 * time.Hour.Milliseconds()
oneDayOffset = 24 * time.Hour.Milliseconds()
oneHourOffset = time.Hour.Milliseconds()
fiveMinOffset = 5 * time.Minute.Milliseconds()
)
func (s Seasonality) IsValid() bool {
switch s {
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
return true
default:
return false
}
}
type GetAnomaliesRequest struct {
Params *v3.QueryRangeParamsV3
Seasonality Seasonality
}
type GetAnomaliesResponse struct {
Results []*v3.Result
}
// anomalyParams is the params for anomaly detection
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
//
// ^ ^
// | |
// (rounded value for past peiod) + (seasonal growth)
//
// score = abs(value - prediction) / stddev (current_season_query)
type anomalyQueryParams struct {
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
// The results obtained from this query are used to compare with predicted values
// and to detect anomalies
CurrentPeriodQuery *v3.QueryRangeParamsV3
// PastPeriodQuery is the query range params for past seasonal period
// Example: For weekly seasonality, (now-1w-5m, now-1w)
// : For daily seasonality, (now-1d-5m, now-1d)
// : For hourly seasonality, (now-1h-5m, now-1h)
PastPeriodQuery *v3.QueryRangeParamsV3
// CurrentSeasonQuery is the query range params for current period (seasonal)
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
CurrentSeasonQuery *v3.QueryRangeParamsV3
// PastSeasonQuery is the query range params for past seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery *v3.QueryRangeParamsV3
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
Past2SeasonQuery *v3.QueryRangeParamsV3
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
Past3SeasonQuery *v3.QueryRangeParamsV3
}
func updateStepInterval(req *v3.QueryRangeParamsV3) {
start := req.Start
end := req.End
req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60))
for _, q := range req.CompositeQuery.BuilderQueries {
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
q.StepInterval = minStep
}
}
}
func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams {
start := req.Start
end := req.End
currentPeriodQuery := &v3.QueryRangeParamsV3{
Start: start,
End: end,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(currentPeriodQuery)
var pastPeriodStart, pastPeriodEnd int64
switch seasonality {
// for one week period, we fetch the data from the past week with 5 min offset
case SeasonalityWeekly:
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
pastPeriodEnd = end - oneWeekOffset
// for one day period, we fetch the data from the past day with 5 min offset
case SeasonalityDaily:
pastPeriodStart = start - oneDayOffset - fiveMinOffset
pastPeriodEnd = end - oneDayOffset
// for one hour period, we fetch the data from the past hour with 5 min offset
case SeasonalityHourly:
pastPeriodStart = start - oneHourOffset - fiveMinOffset
pastPeriodEnd = end - oneHourOffset
}
pastPeriodQuery := &v3.QueryRangeParamsV3{
Start: pastPeriodStart,
End: pastPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(pastPeriodQuery)
// seasonality growth trend
var currentGrowthPeriodStart, currentGrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
currentGrowthPeriodStart = start - oneWeekOffset
currentGrowthPeriodEnd = end
case SeasonalityDaily:
currentGrowthPeriodStart = start - oneDayOffset
currentGrowthPeriodEnd = end
case SeasonalityHourly:
currentGrowthPeriodStart = start - oneHourOffset
currentGrowthPeriodEnd = end
}
currentGrowthQuery := &v3.QueryRangeParamsV3{
Start: currentGrowthPeriodStart,
End: currentGrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(currentGrowthQuery)
var pastGrowthPeriodStart, pastGrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
pastGrowthPeriodStart = start - 2*oneWeekOffset
pastGrowthPeriodEnd = start - 1*oneWeekOffset
case SeasonalityDaily:
pastGrowthPeriodStart = start - 2*oneDayOffset
pastGrowthPeriodEnd = start - 1*oneDayOffset
case SeasonalityHourly:
pastGrowthPeriodStart = start - 2*oneHourOffset
pastGrowthPeriodEnd = start - 1*oneHourOffset
}
pastGrowthQuery := &v3.QueryRangeParamsV3{
Start: pastGrowthPeriodStart,
End: pastGrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(pastGrowthQuery)
var past2GrowthPeriodStart, past2GrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
past2GrowthPeriodStart = start - 3*oneWeekOffset
past2GrowthPeriodEnd = start - 2*oneWeekOffset
case SeasonalityDaily:
past2GrowthPeriodStart = start - 3*oneDayOffset
past2GrowthPeriodEnd = start - 2*oneDayOffset
case SeasonalityHourly:
past2GrowthPeriodStart = start - 3*oneHourOffset
past2GrowthPeriodEnd = start - 2*oneHourOffset
}
past2GrowthQuery := &v3.QueryRangeParamsV3{
Start: past2GrowthPeriodStart,
End: past2GrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(past2GrowthQuery)
var past3GrowthPeriodStart, past3GrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
past3GrowthPeriodStart = start - 4*oneWeekOffset
past3GrowthPeriodEnd = start - 3*oneWeekOffset
case SeasonalityDaily:
past3GrowthPeriodStart = start - 4*oneDayOffset
past3GrowthPeriodEnd = start - 3*oneDayOffset
case SeasonalityHourly:
past3GrowthPeriodStart = start - 4*oneHourOffset
past3GrowthPeriodEnd = start - 3*oneHourOffset
}
past3GrowthQuery := &v3.QueryRangeParamsV3{
Start: past3GrowthPeriodStart,
End: past3GrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(past3GrowthQuery)
return &anomalyQueryParams{
CurrentPeriodQuery: currentPeriodQuery,
PastPeriodQuery: pastPeriodQuery,
CurrentSeasonQuery: currentGrowthQuery,
PastSeasonQuery: pastGrowthQuery,
Past2SeasonQuery: past2GrowthQuery,
Past3SeasonQuery: past3GrowthQuery,
}
}
type anomalyQueryResults struct {
CurrentPeriodResults []*v3.Result
PastPeriodResults []*v3.Result
CurrentSeasonResults []*v3.Result
PastSeasonResults []*v3.Result
Past2SeasonResults []*v3.Result
Past3SeasonResults []*v3.Result
}

View File

@@ -1,9 +0,0 @@
package anomaly
import (
"context"
)
type Provider interface {
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
}

View File

@@ -1,466 +0,0 @@
package anomaly
import (
"context"
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
var (
// TODO(srikanthccv): make this configurable?
movingAvgWindowSize = 7
)
// BaseProvider is an interface that includes common methods for all provider types
type BaseProvider interface {
GetBaseSeasonalProvider() *BaseSeasonalProvider
}
// GenericProviderOption is a generic type for provider options
type GenericProviderOption[T BaseProvider] func(T)
func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().cache = cache
}
}
func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().keyGenerator = keyGenerator
}
}
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().ff = ff
}
}
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().reader = reader
}
}
type BaseSeasonalProvider struct {
querierV2 interfaces.Querier
reader interfaces.Reader
fluxInterval time.Duration
cache cache.Cache
keyGenerator cache.KeyGenerator
ff interfaces.FeatureLookup
}
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
if !req.Seasonality.IsValid() {
req.Seasonality = SeasonalityDaily
}
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
}
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
currentPeriodResults, err = postprocess.PostProcessResult(currentPeriodResults, params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
if err != nil {
return nil, err
}
pastPeriodResults, err = postprocess.PostProcessResult(pastPeriodResults, params.PastPeriodQuery)
if err != nil {
return nil, err
}
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
currentSeasonResults, err = postprocess.PostProcessResult(currentSeasonResults, params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
if err != nil {
return nil, err
}
pastSeasonResults, err = postprocess.PostProcessResult(pastSeasonResults, params.PastSeasonQuery)
if err != nil {
return nil, err
}
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
if err != nil {
return nil, err
}
past2SeasonResults, err = postprocess.PostProcessResult(past2SeasonResults, params.Past2SeasonQuery)
if err != nil {
return nil, err
}
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
if err != nil {
return nil, err
}
past3SeasonResults, err = postprocess.PostProcessResult(past3SeasonResults, params.Past3SeasonQuery)
if err != nil {
return nil, err
}
return &anomalyQueryResults{
CurrentPeriodResults: currentPeriodResults,
PastPeriodResults: pastPeriodResults,
CurrentSeasonResults: currentSeasonResults,
PastSeasonResults: pastSeasonResults,
Past2SeasonResults: past2SeasonResults,
Past3SeasonResults: past3SeasonResults,
}, nil
}
// getMatchingSeries gets the matching series from the query result
// for the given series
func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series {
if queryResult == nil || len(queryResult.Series) == 0 {
return nil
}
for _, curr := range queryResult.Series {
currLabels := labels.FromMap(curr.Labels)
seriesLabels := labels.FromMap(series.Labels)
if currLabels.Hash() == seriesLabels.Hash() {
return curr
}
}
return nil
}
func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 {
if series == nil || len(series.Points) == 0 {
return 0
}
var sum float64
for _, smpl := range series.Points {
sum += smpl.Value
}
return sum / float64(len(series.Points))
}
func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 {
if series == nil || len(series.Points) == 0 {
return 0
}
avg := p.getAvg(series)
var sum float64
for _, smpl := range series.Points {
sum += math.Pow(smpl.Value-avg, 2)
}
return math.Sqrt(sum / float64(len(series.Points)))
}
// getMovingAvg gets the moving average for the given series
// for the given window size and start index
func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSize, startIdx int) float64 {
if series == nil || len(series.Points) == 0 {
return 0
}
if startIdx >= len(series.Points)-movingAvgWindowSize {
startIdx = int(math.Max(0, float64(len(series.Points)-movingAvgWindowSize)))
}
var sum float64
points := series.Points[startIdx:]
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
sum += points[i].Value
}
avg := sum / float64(movingAvgWindowSize)
return avg
}
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
if len(floats) == 0 {
return 0
}
var sum float64
for _, f := range floats {
sum += f
}
return sum / float64(len(floats))
}
func (p *BaseSeasonalProvider) getPredictedSeries(
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
) *v3.Series {
predictedSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
// for each point in the series, get the predicted value
// the predicted value is the moving average (with window size = 7) of the previous period series
// plus the average of the current season series
// minus the mean of the past season series, past2 season series and past3 season series
for idx, curr := range series.Points {
predictedValue :=
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
p.getAvg(currentSeasonSeries) -
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
if predictedValue < 0 {
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
zap.L().Info("predictedSeries",
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
zap.Any("labels", series.Labels),
zap.Float64("predictedValue", predictedValue),
)
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: predictedValue,
})
}
return predictedSeries
}
// getBounds gets the upper and lower bounds for the given series
// for the given z score threshold
// moving avg of the previous period series + z score threshold * std dev of the series
// moving avg of the previous period series - z score threshold * std dev of the series
func (p *BaseSeasonalProvider) getBounds(
series, predictedSeries *v3.Series,
zScoreThreshold float64,
) (*v3.Series, *v3.Series) {
upperBoundSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
lowerBoundSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
for idx, curr := range series.Points {
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: upperBound,
})
lowerBoundSeries.Points = append(lowerBoundSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: math.Max(lowerBound, 0),
})
}
return upperBoundSeries, lowerBoundSeries
}
// getExpectedValue gets the expected value for the given series
// for the given index
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
func (p *BaseSeasonalProvider) getExpectedValue(
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, idx int,
) float64 {
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
}
// getScore gets the anomaly score for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getScore(
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
) float64 {
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
return (value - expectedValue) / p.getStdDev(weekSeries)
}
// getAnomalyScores gets the anomaly scores for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getAnomalyScores(
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
) *v3.Series {
anomalyScoreSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
for idx, curr := range series.Points {
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: anomalyScore,
})
}
return anomalyScoreSeries
}
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
if err != nil {
return nil, err
}
currentPeriodResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.CurrentPeriodResults {
currentPeriodResultsMap[result.QueryName] = result
}
pastPeriodResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.PastPeriodResults {
pastPeriodResultsMap[result.QueryName] = result
}
currentSeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.CurrentSeasonResults {
currentSeasonResultsMap[result.QueryName] = result
}
pastSeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.PastSeasonResults {
pastSeasonResultsMap[result.QueryName] = result
}
past2SeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.Past2SeasonResults {
past2SeasonResultsMap[result.QueryName] = result
}
past3SeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.Past3SeasonResults {
past3SeasonResultsMap[result.QueryName] = result
}
for _, result := range currentPeriodResultsMap {
funcs := req.Params.CompositeQuery.BuilderQueries[result.QueryName].Functions
var zScoreThreshold float64
for _, f := range funcs {
if f.Name == v3.FunctionNameAnomaly {
value, ok := f.NamedArgs["z_score_threshold"]
if ok {
zScoreThreshold = value.(float64)
} else {
zScoreThreshold = 3
}
break
}
}
pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName]
if !ok {
continue
}
currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName]
if !ok {
continue
}
pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName]
if !ok {
continue
}
past2SeasonResult, ok := past2SeasonResultsMap[result.QueryName]
if !ok {
continue
}
past3SeasonResult, ok := past3SeasonResultsMap[result.QueryName]
if !ok {
continue
}
for _, series := range result.Series {
stdDev := p.getStdDev(series)
zap.L().Info("stdDev", zap.Float64("stdDev", stdDev), zap.Any("labels", series.Labels))
pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series)
currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series)
pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series)
past2SeasonSeries := p.getMatchingSeries(past2SeasonResult, series)
past3SeasonSeries := p.getMatchingSeries(past3SeasonResult, series)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
zap.L().Info("getAvg", zap.Float64("prevSeriesAvg", prevSeriesAvg), zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg), zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg), zap.Float64("past2SeasonSeriesAvg", past2SeasonSeriesAvg), zap.Float64("past3SeasonSeriesAvg", past3SeasonSeriesAvg), zap.Any("labels", series.Labels))
predictedSeries := p.getPredictedSeries(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
result.PredictedSeries = append(result.PredictedSeries, predictedSeries)
upperBoundSeries, lowerBoundSeries := p.getBounds(
series,
predictedSeries,
zScoreThreshold,
)
result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries)
result.LowerBoundSeries = append(result.LowerBoundSeries, lowerBoundSeries)
anomalyScoreSeries := p.getAnomalyScores(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries)
}
}
results := make([]*v3.Result, 0, len(currentPeriodResultsMap))
for _, result := range currentPeriodResultsMap {
results = append(results, result)
}
return &GetAnomaliesResponse{
Results: results,
}, nil
}

View File

@@ -1,43 +0,0 @@
package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type WeeklyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*WeeklyProvider)(nil)
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &wp.BaseSeasonalProvider
}
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
wp := &WeeklyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(wp)
}
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: wp.reader,
Cache: wp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: wp.fluxInterval,
FeatureLookup: wp.ff,
})
return wp
}
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityWeekly
return p.getAnomalies(ctx, req)
}

View File

@@ -24,6 +24,7 @@ import (
type APIHandlerOptions struct { type APIHandlerOptions struct {
DataConnector interfaces.DataConnector DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig SkipConfig *basemodel.SkipConfig
PreferDelta bool
PreferSpanMetrics bool PreferSpanMetrics bool
MaxIdleConns int MaxIdleConns int
MaxOpenConns int MaxOpenConns int
@@ -38,10 +39,7 @@ type APIHandlerOptions struct {
Cache cache.Cache Cache cache.Cache
Gateway *httputil.ReverseProxy Gateway *httputil.ReverseProxy
// Querier Influx Interval // Querier Influx Interval
FluxInterval time.Duration FluxInterval time.Duration
UseLogsNewSchema bool
UseTraceNewSchema bool
UseLicensesV3 bool
} }
type APIHandler struct { type APIHandler struct {
@@ -55,6 +53,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{ baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector, Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig, SkipConfig: opts.SkipConfig,
PerferDelta: opts.PreferDelta,
PreferSpanMetrics: opts.PreferSpanMetrics, PreferSpanMetrics: opts.PreferSpanMetrics,
MaxIdleConns: opts.MaxIdleConns, MaxIdleConns: opts.MaxIdleConns,
MaxOpenConns: opts.MaxOpenConns, MaxOpenConns: opts.MaxOpenConns,
@@ -66,9 +65,6 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
LogsParsingPipelineController: opts.LogsParsingPipelineController, LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache, Cache: opts.Cache,
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
UseLicensesV3: opts.UseLicensesV3,
}) })
if err != nil { if err != nil {
@@ -177,22 +173,12 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut) router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut) router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
// v2
router.HandleFunc("/api/v2/licenses", router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)). am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet) Methods(http.MethodGet)
// v3
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
// v4
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
// Gateway // Gateway
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP)) router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
ah.APIHandler.RegisterRoutes(router, am) ah.APIHandler.RegisterRoutes(router, am)

View File

@@ -35,14 +35,14 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
req := basemodel.LoginRequest{} req := basemodel.LoginRequest{}
err := parseRequest(r, &req) err := parseRequest(r, &req)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
ctx := context.Background() ctx := context.Background()
if req.Email != "" && ah.CheckFeature(model.SSO) { if req.Email != "" && ah.CheckFeature(model.SSO) {
var apierr basemodel.BaseApiError var apierr *basemodel.ApiError
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email) _, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
if apierr != nil && !apierr.IsNil() { if apierr != nil && !apierr.IsNil() {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)
@@ -74,7 +74,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
requestBody, err := io.ReadAll(r.Body) requestBody, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
zap.L().Error("received no input in api", zap.Error(err)) zap.L().Error("received no input in api", zap.Error(err))
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
@@ -82,7 +82,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
zap.L().Error("received invalid user registration request", zap.Error(err)) zap.L().Error("received invalid user registration request", zap.Error(err))
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil) RespondError(w, basemodel.BadRequest(fmt.Errorf("failed to register user")), nil)
return return
} }
@@ -90,13 +90,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
invite, err := baseauth.ValidateInvite(ctx, req) invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil { if err != nil {
zap.L().Error("failed to validate invite token", zap.Error(err)) zap.L().Error("failed to validate invite token", zap.Error(err))
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
if invite == nil { if invite == nil {
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err)) zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil) RespondError(w, basemodel.BadRequest(basemodel.ErrSignupFailed{}), nil)
return return
} }
@@ -104,7 +104,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email) domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
if apierr != nil { if apierr != nil {
zap.L().Error("failed to get domain from email", zap.Error(apierr)) zap.L().Error("failed to get domain from email", zap.Error(apierr))
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil) RespondError(w, basemodel.InternalError(basemodel.ErrSignupFailed{}), nil)
} }
precheckResp := &basemodel.PrecheckResponse{ precheckResp := &basemodel.PrecheckResponse{
@@ -120,7 +120,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
return return
} }
var precheckError basemodel.BaseApiError var precheckError *basemodel.ApiError
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl) precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
if precheckError != nil { if precheckError != nil {
@@ -130,7 +130,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
} else { } else {
// no-sso, validate password // no-sso, validate password
if err := baseauth.ValidatePassword(req.Password); err != nil { if err := baseauth.ValidatePassword(req.Password); err != nil {
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil) RespondError(w, basemodel.InternalError(fmt.Errorf("password is not in a valid format")), nil)
return return
} }
@@ -155,7 +155,7 @@ func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
inviteObject, err := baseauth.GetInvite(context.Background(), token) inviteObject, err := baseauth.GetInvite(context.Background(), token)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }

View File

@@ -1,9 +1,7 @@
package api package api
import ( import (
"errors"
"net/http" "net/http"
"strings"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"go.signoz.io/signoz/pkg/query-service/app/dashboards" "go.signoz.io/signoz/pkg/query-service/app/dashboards"
@@ -31,10 +29,6 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
// Get the dashboard UUID from the request // Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"] uuid := mux.Vars(r)["uuid"]
if strings.HasPrefix(uuid,"integration") {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
return
}
dashboard, err := dashboards.GetDashboard(r.Context(), uuid) dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error()) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())

View File

@@ -9,6 +9,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
@@ -27,12 +28,12 @@ func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
req := model.OrgDomain{} req := model.OrgDomain{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
if err := req.ValidNew(); err != nil { if err := req.ValidNew(); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
@@ -50,18 +51,18 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
domainIdStr := mux.Vars(r)["id"] domainIdStr := mux.Vars(r)["id"]
domainId, err := uuid.Parse(domainIdStr) domainId, err := uuid.Parse(domainIdStr)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
req := model.OrgDomain{Id: domainId} req := model.OrgDomain{Id: domainId}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
req.Id = domainId req.Id = domainId
if err := req.Valid(nil); err != nil { if err := req.Valid(nil); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
} }
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil { if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
@@ -77,7 +78,7 @@ func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
domainId, err := uuid.Parse(domainIdStr) domainId, err := uuid.Parse(domainIdStr)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil) RespondError(w, basemodel.BadRequest(fmt.Errorf("invalid domain id")), nil)
return return
} }

View File

@@ -1,48 +1,17 @@
package api package api
import ( import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http" "net/http"
"time"
"go.signoz.io/signoz/ee/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
) )
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
featureSet, err := ah.FF().GetFeatureFlags() featureSet, err := ah.FF().GetFeatureFlags()
if err != nil { if err != nil {
ah.HandleError(w, err, http.StatusInternalServerError) ah.HandleError(w, err, http.StatusInternalServerError)
return return
} }
if constants.FetchFeatures == "true" {
zap.L().Debug("fetching license")
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
if err != nil {
zap.L().Error("failed to fetch license", zap.Error(err))
} else if license == nil {
zap.L().Debug("no active license found")
} else {
licenseKey := license.Key
zap.L().Debug("fetching zeus features")
zeusFeatures, err := fetchZeusFeatures(constants.ZeusFeaturesURL, licenseKey)
if err == nil {
zap.L().Debug("fetched zeus features", zap.Any("features", zeusFeatures))
// merge featureSet and zeusFeatures in featureSet with higher priority to zeusFeatures
featureSet = MergeFeatureSets(zeusFeatures, featureSet)
} else {
zap.L().Error("failed to fetch zeus features", zap.Error(err))
}
}
}
if ah.opts.PreferSpanMetrics { if ah.opts.PreferSpanMetrics {
for idx := range featureSet { for idx := range featureSet {
feature := &featureSet[idx] feature := &featureSet[idx]
@@ -51,96 +20,5 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
} }
} }
} }
ah.Respond(w, featureSet) ah.Respond(w, featureSet)
} }
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
// and returns the FeatureSet.
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
// Check if the URL is empty
if url == "" {
return nil, fmt.Errorf("url is empty")
}
// Check if the licenseKey is empty
if licenseKey == "" {
return nil, fmt.Errorf("licenseKey is empty")
}
// Creating an HTTP client with a timeout for better control
client := &http.Client{
Timeout: 10 * time.Second,
}
// Creating a new GET request
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
// Setting the custom header
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
// Making the GET request
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make GET request: %w", err)
}
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
// Check for non-OK status code
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%w: %d %s", errors.New("received non-OK HTTP status code"), resp.StatusCode, http.StatusText(resp.StatusCode))
}
// Reading and decoding the response body
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
var zeusResponse ZeusFeaturesResponse
if err := json.Unmarshal(body, &zeusResponse); err != nil {
return nil, fmt.Errorf("%w: %v", errors.New("failed to decode response body"), err)
}
if zeusResponse.Status != "success" {
return nil, fmt.Errorf("%w: %s", errors.New("failed to fetch zeus features"), zeusResponse.Status)
}
return zeusResponse.Data, nil
}
type ZeusFeaturesResponse struct {
Status string `json:"status"`
Data basemodel.FeatureSet `json:"data"`
}
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
// Create a map to store the merged features
featureMap := make(map[string]basemodel.Feature)
// Add all features from the otherFeatures set to the map
for _, feature := range internalFeatures {
featureMap[feature.Name] = feature
}
// Add all features from the zeusFeatures set to the map
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it
for _, feature := range zeusFeatures {
featureMap[feature.Name] = feature
}
// Convert the map back to a FeatureSet slice
var mergedFeatures basemodel.FeatureSet
for _, feature := range featureMap {
mergedFeatures = append(mergedFeatures, feature)
}
return mergedFeatures
}

View File

@@ -1,88 +0,0 @@
package api
import (
"testing"
"github.com/stretchr/testify/assert"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func TestMergeFeatureSets(t *testing.T) {
tests := []struct {
name string
zeusFeatures basemodel.FeatureSet
internalFeatures basemodel.FeatureSet
expected basemodel.FeatureSet
}{
{
name: "empty zeusFeatures and internalFeatures",
zeusFeatures: basemodel.FeatureSet{},
internalFeatures: basemodel.FeatureSet{},
expected: basemodel.FeatureSet{},
},
{
name: "non-empty zeusFeatures and empty internalFeatures",
zeusFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
},
internalFeatures: basemodel.FeatureSet{},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
},
},
{
name: "empty zeusFeatures and non-empty internalFeatures",
zeusFeatures: basemodel.FeatureSet{},
internalFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
},
},
{
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
zeusFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature3", Active: false},
},
internalFeatures: basemodel.FeatureSet{
{Name: "Feature2", Active: true},
{Name: "Feature4", Active: false},
},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: true},
{Name: "Feature3", Active: false},
{Name: "Feature4", Active: false},
},
},
{
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
zeusFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
},
internalFeatures: basemodel.FeatureSet{
{Name: "Feature1", Active: false},
{Name: "Feature3", Active: true},
},
expected: basemodel.FeatureSet{
{Name: "Feature1", Active: true},
{Name: "Feature2", Active: false},
{Name: "Feature3", Active: true},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := MergeFeatureSets(test.zeusFeatures, test.internalFeatures)
assert.ElementsMatch(t, test.expected, actual)
})
}
}

View File

@@ -9,15 +9,7 @@ import (
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) { func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context() ctx := req.Context()
validPath := false if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) {
for _, allowedPrefix := range gateway.AllowedPrefix {
if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) {
validPath = true
break
}
}
if !validPath {
rw.WriteHeader(http.StatusNotFound) rw.WriteHeader(http.StatusNotFound)
return return
} }

View File

@@ -9,7 +9,7 @@ import (
"go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/http/render" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -60,21 +60,6 @@ type billingDetails struct {
} `json:"data"` } `json:"data"`
} }
type ApplyLicenseRequest struct {
LicenseKey string `json:"key"`
}
type ListLicenseResponse map[string]interface{}
func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse {
listLicenses := []ListLicenseResponse{}
for _, license := range licensesV3 {
listLicenses = append(listLicenses, license.Data)
}
return listLicenses
}
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background()) licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil { if apiError != nil {
@@ -84,22 +69,15 @@ func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
} }
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
if ah.UseLicensesV3 {
// if the licenses v3 is toggled on then do not apply license in v2 and run the validator!
// TODO: remove after migration to v3 and deprecation from zeus
zap.L().Info("early return from apply license v2 call")
render.Success(w, http.StatusOK, nil)
return
}
var l model.License var l model.License
if err := json.NewDecoder(r.Body).Decode(&l); err != nil { if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
if l.Key == "" { if l.Key == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil) RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
return return
} }
license, apiError := ah.LM().Activate(r.Context(), l.Key) license, apiError := ah.LM().Activate(r.Context(), l.Key)
@@ -111,68 +89,6 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, license) ah.Respond(w, license)
} }
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicensesV3(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
}
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
// return 404 not found if there is no active license
if activeLicense == nil {
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
return
}
// TODO deprecate this when we move away from key for stripe
activeLicense.Data["key"] = activeLicense.Key
render.Success(w, http.StatusOK, activeLicense.Data)
}
// this function is called by zeus when inserting licenses in the query-service
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
var licenseKey ApplyLicenseRequest
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if licenseKey.LicenseKey == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusAccepted, nil)
}
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
apiError := ah.LM().RefreshLicense(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct { type checkoutResponse struct {
@@ -185,20 +101,20 @@ func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
hClient := &http.Client{} hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body) req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
if err != nil { if err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey) req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req) licenseResp, err := hClient.Do(req)
if err != nil { if err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
// decode response body // decode response body
var resp checkoutResponse var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil { if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
@@ -209,7 +125,7 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
licenseKey := r.URL.Query().Get("licenseKey") licenseKey := r.URL.Query().Get("licenseKey")
if licenseKey == "" { if licenseKey == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil) RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
return return
} }
@@ -218,20 +134,20 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
hClient := &http.Client{} hClient := &http.Client{}
req, err := http.NewRequest("GET", billingURL, nil) req, err := http.NewRequest("GET", billingURL, nil)
if err != nil { if err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey) req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
billingResp, err := hClient.Do(req) billingResp, err := hClient.Do(req)
if err != nil { if err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
// decode response body // decode response body
var billingResponse billingDetails var billingResponse billingDetails
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil { if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
@@ -239,49 +155,11 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, billingResponse.Data) ah.Respond(w, billingResponse.Data)
} }
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
licensesV2 := []model.License{}
for _, l := range licenses {
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = model.Basic
}
licenseV2 := model.License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: model.LicensePlan{
PlanKey: planKeyFromPlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
licensesV2 = append(licensesV2, licenseV2)
}
return licensesV2
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
var licenses []model.License licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
if ah.UseLicensesV3 { RespondError(w, apiError, nil)
licensesV3, err := ah.LM().GetLicensesV3(r.Context())
if err != nil {
RespondError(w, err, nil)
return
}
licenses = convertLicenseV3ToLicenseV2(licensesV3)
} else {
_licenses, apiError := ah.LM().GetLicenses(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
licenses = _licenses
} }
resp := model.Licenses{ resp := model.Licenses{
@@ -374,20 +252,20 @@ func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
hClient := &http.Client{} hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body) req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
if err != nil { if err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey) req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req) licenseResp, err := hClient.Do(req)
if err != nil { if err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }
// decode response body // decode response body
var resp checkoutResponse var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil { if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil) RespondError(w, basemodel.InternalError(err), nil)
return return
} }

View File

@@ -31,13 +31,13 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
req := model.CreatePATRequestBody{} req := model.CreatePATRequestBody{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
user, err := auth.GetUserFromRequest(r) user, err := auth.GetUserFromRequest(r)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{ RespondError(w, &basemodel.ApiError{
Typ: model.ErrorUnauthorized, Typ: basemodel.ErrorUnauthorized,
Err: err, Err: err,
}, nil) }, nil)
return return
@@ -49,7 +49,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
} }
err = validatePATRequest(pat) err = validatePATRequest(pat)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
@@ -66,7 +66,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
} }
zap.L().Info("Got Create PAT request", zap.Any("pat", pat)) zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
var apierr basemodel.BaseApiError var apierr *basemodel.ApiError
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil { if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)
return return
@@ -93,14 +93,14 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
req := model.PAT{} req := model.PAT{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
user, err := auth.GetUserFromRequest(r) user, err := auth.GetUserFromRequest(r)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{ RespondError(w, &basemodel.ApiError{
Typ: model.ErrorUnauthorized, Typ: basemodel.ErrorUnauthorized,
Err: err, Err: err,
}, nil) }, nil)
return return
@@ -108,7 +108,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
err = validatePATRequest(req) err = validatePATRequest(req)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, basemodel.BadRequest(err), nil)
return return
} }
@@ -116,7 +116,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"] id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now().Unix() req.UpdatedAt = time.Now().Unix()
zap.L().Info("Got Update PAT request", zap.Any("pat", req)) zap.L().Info("Got Update PAT request", zap.Any("pat", req))
var apierr basemodel.BaseApiError var apierr *basemodel.ApiError
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil { if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)
return return
@@ -129,8 +129,8 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
ctx := context.Background() ctx := context.Background()
user, err := auth.GetUserFromRequest(r) user, err := auth.GetUserFromRequest(r)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{ RespondError(w, &basemodel.ApiError{
Typ: model.ErrorUnauthorized, Typ: basemodel.ErrorUnauthorized,
Err: err, Err: err,
}, nil) }, nil)
return return
@@ -149,8 +149,8 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"] id := mux.Vars(r)["id"]
user, err := auth.GetUserFromRequest(r) user, err := auth.GetUserFromRequest(r)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{ RespondError(w, &basemodel.ApiError{
Typ: model.ErrorUnauthorized, Typ: basemodel.ErrorUnauthorized,
Err: err, Err: err,
}, nil) }, nil)
return return

View File

@@ -1,129 +0,0 @@
package api
import (
"bytes"
"fmt"
"io"
"net/http"
"go.signoz.io/signoz/ee/query-service/anomaly"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
)
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
queryRangeParams.Version = "v4"
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
anomalyQueryExists := false
anomalyQuery := &v3.BuilderQuery{}
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
for _, fn := range query.Functions {
if fn.Name == v3.FunctionNameAnomaly {
anomalyQueryExists = true
anomalyQuery = query
break
}
}
}
}
if anomalyQueryExists {
// ensure all queries have metric data source, and there should be only one anomaly query
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
// What is query.QueryName == query.Expression doing here?
// In the current implementation, the way to recognize if a query is a formula is by
// checking if the expression is the same as the query name. if the expression is different
// then it is a formula. otherwise, it is simple builder query.
if query.DataSource != v3.DataSourceMetrics && query.QueryName == query.Expression {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil)
return
}
}
// get the threshold, and seasonality from the anomaly query
var seasonality anomaly.Seasonality
for _, fn := range anomalyQuery.Functions {
if fn.Name == v3.FunctionNameAnomaly {
seasonalityStr, ok := fn.NamedArgs["seasonality"].(string)
if !ok {
seasonalityStr = "daily"
}
if seasonalityStr == "weekly" {
seasonality = anomaly.SeasonalityWeekly
} else if seasonalityStr == "daily" {
seasonality = anomaly.SeasonalityDaily
} else {
seasonality = anomaly.SeasonalityHourly
}
break
}
}
var provider anomaly.Provider
switch seasonality {
case anomaly.SeasonalityWeekly:
provider = anomaly.NewWeeklyProvider(
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
)
case anomaly.SeasonalityDaily:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
case anomaly.SeasonalityHourly:
provider = anomaly.NewHourlyProvider(
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
)
default:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
}
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
resp := v3.QueryRangeResponse{
Result: anomalies.Results,
ResultType: "anomaly",
}
aH.Respond(w, resp)
} else {
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
aH.QueryRangeV4(w, r)
}
}

View File

@@ -7,6 +7,6 @@ import (
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) { func RespondError(w http.ResponseWriter, apiErr *basemodel.ApiError, data interface{}) {
baseapp.RespondError(w, apiErr, data) baseapp.RespondError(w, apiErr)
} }

View File

@@ -4,7 +4,6 @@ import (
"net/http" "net/http"
"go.signoz.io/signoz/ee/query-service/app/db" "go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app" baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap" "go.uber.org/zap"
@@ -19,7 +18,7 @@ func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
} }
searchTracesParams, err := baseapp.ParseSearchTracesParams(r) searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params") RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, "Error reading params")
return return
} }

View File

@@ -0,0 +1,401 @@
package db
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"reflect"
"regexp"
"sort"
"strings"
"time"
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
// GetMetricResultEE runs the query and returns list of time series
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult")()
zap.L().Info("Executing metric result query: ", zap.String("query", query))
var hash string
// If getSubTreeSpans function is used in the clickhouse query
if strings.Contains(query, "getSubTreeSpans(") {
var err error
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
if err == fmt.Errorf("no spans found for the given query") {
return nil, "", nil
}
if err != nil {
return nil, "", err
}
}
rows, err := r.conn.Query(ctx, query)
if err != nil {
zap.L().Error("Error in processing query", zap.Error(err))
return nil, "", fmt.Errorf("error in processing query")
}
var (
columnTypes = rows.ColumnTypes()
columnNames = rows.Columns()
vars = make([]interface{}, len(columnTypes))
)
for i := range columnTypes {
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make(map[string][]basemodel.MetricPoint)
// attribute key-value pairs for each group selection
attributesMap := make(map[string]map[string]string)
defer rows.Close()
for rows.Next() {
if err := rows.Scan(vars...); err != nil {
return nil, "", err
}
var groupBy []string
var metricPoint basemodel.MetricPoint
groupAttributes := make(map[string]string)
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx, v := range vars {
colName := columnNames[idx]
switch v := v.(type) {
case *string:
// special case for returning all labels
if colName == "fullLabels" {
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
return nil, "", err
}
for key, val := range metric {
groupBy = append(groupBy, val)
groupAttributes[key] = val
}
} else {
groupBy = append(groupBy, *v)
groupAttributes[colName] = *v
}
case *time.Time:
metricPoint.Timestamp = v.UnixMilli()
case *float64:
metricPoint.Value = *v
case **float64:
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := *v
if floatVal != nil {
metricPoint.Value = *floatVal
}
case *float32:
float32Val := float32(*v)
metricPoint.Value = float64(float32Val)
case *uint8, *uint64, *uint16, *uint32:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
}
case *int8, *int16, *int32, *int64:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
key := strings.Join(groupBy, "")
attributesMap[key] = groupAttributes
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
}
var seriesList []*basemodel.Series
for key := range metricPointsMap {
points := metricPointsMap[key]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len(points) != 0 && len(points) > 1 {
points = points[1:]
}
attributes := attributesMap[key]
series := basemodel.Series{Labels: attributes, Points: points}
seriesList = append(seriesList, &series)
}
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil {
// zap.L().Error("Error in dropping temporary table: ", err)
// return nil, err
// }
if hash == "" {
return seriesList, hash, nil
} else {
return seriesList, "getSubTreeSpans" + hash, nil
}
}
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.L().Debug("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
// process the query to fetch subTree query
var subtreeInput string
query, subtreeInput, hash = processQuery(query, hash)
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil {
zap.L().Error("Error in dropping temporary table", zap.Error(err))
return query, hash, err
}
// Create temporary table to store the getSubTreeSpans() results
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil {
zap.L().Error("Error in creating temporary table", zap.Error(err))
return query, hash, err
}
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput
// Execute the subTree query
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.L().Info(getSpansSubQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("error in processing sql query")
}
var searchScanResponses []basemodel.SearchSpanDBResponseItem
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
// Fetch all the spans from of same TraceID so that we can build subtree
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("no spans found for the given query")
}
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("error in processing sql query")
}
// Process model to fetch the spans
zap.L().Debug("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem
json.Unmarshal([]byte(item.Model), &jsonItem)
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
if jsonItem.Events == nil {
jsonItem.Events = []string{}
}
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil {
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
return query, hash, err
}
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil {
zap.L().Error("Error in preparing batch statement", zap.Error(err))
return query, hash, err
}
for _, span := range treeSearchResponse {
var parentID string
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
parentID = span.References[0].SpanId
}
err = statement.Append(
time.Unix(0, int64(span.TimeUnixNano)),
span.TraceID,
span.SpanID,
parentID,
span.RootSpanID,
span.ServiceName,
span.Name,
span.RootName,
uint64(span.DurationNano),
int8(span.Kind),
span.TagMap,
span.Events,
)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, err
}
}
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
err = statement.Send()
if err != nil {
zap.L().Error("Error in sending statement", zap.Error(err))
return query, hash, err
}
return query, hash, nil
}
//lint:ignore SA4009 return hash is feeded to the query
func processQuery(query string, hash string) (string, string, string) {
re3 := regexp.MustCompile(`getSubTreeSpans`)
submatchall3 := re3.FindAllStringIndex(query, -1)
getSubtreeSpansMatchIndex := submatchall3[0][1]
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
sqlCompleteIndex := 0
countParenthesisImbalance := 0
for i, char := range query2countParenthesis {
if string(char) == "(" {
countParenthesisImbalance += 1
}
if string(char) == ")" {
countParenthesisImbalance -= 1
}
if countParenthesisImbalance == 0 {
sqlCompleteIndex = i
break
}
}
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
// hash the subtreeInput
hmd5 := md5.Sum([]byte(subtreeInput))
hash = fmt.Sprintf("%x", hmd5)
// Reformat the query to use the getSubTreeSpans function
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
return query, subtreeInput, hash
}
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
var spans []*model.SpanForTraceDetails
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
zap.L().Debug("Building Tree")
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
// Every span which was fetched from getSubTree Input SQL query is considered root
// For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{}
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
if targetSpan != nil {
break
}
if err != nil {
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
return nil, err
}
}
if targetSpan == nil {
return nil, nil
}
// Build subtree for the target span
// Mark the target span as root by setting parent ID as empty string
targetSpan.ParentID = ""
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
// Get the subtree child spans
for i := 0; len(preParents) != 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
resultSpans := children
// Add the target span to the result spans
resultSpans = append(resultSpans, targetSpan)
for _, item := range resultSpans {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
TimeUnixNano: item.TimeUnixNano,
SpanID: item.SpanID,
TraceID: item.TraceID,
ServiceName: item.ServiceName,
Name: item.Name,
Kind: item.Kind,
References: references,
DurationNano: item.DurationNano,
TagMap: item.TagMap,
Events: item.Events,
HasError: item.HasError,
RootSpanID: getSpansSubQueryDBResponse.SpanID,
RootName: targetSpan.Name,
}
}
}
return searchSpansResult, nil
}

View File

@@ -25,10 +25,8 @@ func NewDataConnector(
maxOpenConns int, maxOpenConns int,
dialTimeout time.Duration, dialTimeout time.Duration,
cluster string, cluster string,
useLogsNewSchema bool,
useTraceNewSchema bool,
) *ClickhouseReader { ) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema) ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
return &ClickhouseReader{ return &ClickhouseReader{
conn: ch.GetConn(), conn: ch.GetConn(),
appdb: localDB, appdb: localDB,

View File

@@ -1,15 +1,14 @@
package app package app
import ( import (
"bufio"
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
"net/http" "net/http"
"net/http/httputil"
_ "net/http/pprof" // http profiler _ "net/http/pprof" // http profiler
"os" "os"
"regexp" "regexp"
@@ -28,9 +27,7 @@ import (
"go.signoz.io/signoz/ee/query-service/dao" "go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway" "go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces" "go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/rules"
baseauth "go.signoz.io/signoz/pkg/query-service/auth" baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/migrate"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3" v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
licensepkg "go.signoz.io/signoz/ee/query-service/license" licensepkg "go.signoz.io/signoz/ee/query-service/license"
@@ -44,7 +41,6 @@ import (
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline" "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp" "go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model" opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
"go.signoz.io/signoz/pkg/query-service/app/preferences"
"go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/cache"
baseconst "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck" "go.signoz.io/signoz/pkg/query-service/healthcheck"
@@ -52,7 +48,7 @@ import (
baseint "go.signoz.io/signoz/pkg/query-service/interfaces" baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine" pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
baserules "go.signoz.io/signoz/pkg/query-service/rules" rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry" "go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/query-service/utils" "go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap" "go.uber.org/zap"
@@ -68,6 +64,7 @@ type ServerOptions struct {
// alert specific params // alert specific params
DisableRules bool DisableRules bool
RuleRepoURL string RuleRepoURL string
PreferDelta bool
PreferSpanMetrics bool PreferSpanMetrics bool
MaxIdleConns int MaxIdleConns int
MaxOpenConns int MaxOpenConns int
@@ -76,15 +73,12 @@ type ServerOptions struct {
FluxInterval string FluxInterval string
Cluster string Cluster string
GatewayUrl string GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
UseLicensesV3 bool
} }
// Server runs HTTP api service // Server runs HTTP api service
type Server struct { type Server struct {
serverOptions *ServerOptions serverOptions *ServerOptions
ruleManager *baserules.Manager ruleManager *rules.Manager
// public http router // public http router
httpConn net.Listener httpConn net.Listener
@@ -117,10 +111,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH) baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
return nil, err
}
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH) localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil { if err != nil {
@@ -129,13 +119,33 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
localDB.SetMaxOpenConns(10) localDB.SetMaxOpenConns(10)
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix) gatewayFeature := basemodel.Feature{
if err != nil { Name: "GATEWAY",
return nil, err Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
}
//Activate this feature if the url is not empty
var gatewayProxy *httputil.ReverseProxy
if serverOptions.GatewayUrl == "" {
gatewayFeature.Active = false
gatewayProxy, err = gateway.NewNoopProxy()
if err != nil {
return nil, err
}
} else {
zap.L().Info("Enabling gateway feature flag ...")
gatewayFeature.Active = true
gatewayProxy, err = gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
if err != nil {
return nil, err
}
} }
// initiate license manager // initiate license manager
lm, err := licensepkg.StartManager("sqlite", localDB, serverOptions.UseLicensesV3) lm, err := licensepkg.StartManager("sqlite", localDB, gatewayFeature)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -156,8 +166,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.MaxOpenConns, serverOptions.MaxOpenConns,
serverOptions.DialTimeout, serverOptions.DialTimeout,
serverOptions.Cluster, serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
) )
go qb.Start(readerReady) go qb.Start(readerReady)
reader = qb reader = qb
@@ -172,14 +180,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err return nil, err
} }
} }
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
<-readerReady <-readerReady
rm, err := makeRulesManager(serverOptions.PromConfigPath, rm, err := makeRulesManager(serverOptions.PromConfigPath,
@@ -187,24 +187,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.RuleRepoURL, serverOptions.RuleRepoURL,
localDB, localDB,
reader, reader,
c,
serverOptions.DisableRules, serverOptions.DisableRules,
lm, lm)
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
)
if err != nil { if err != nil {
return nil, err return nil, err
} }
go func() {
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
if err != nil {
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
}
}()
// initiate opamp // initiate opamp
_, err = opAmpModel.InitDB(localDB) _, err = opAmpModel.InitDB(localDB)
if err != nil { if err != nil {
@@ -249,6 +238,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
telemetry.GetInstance().SetReader(reader) telemetry.GetInstance().SetReader(reader)
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey) telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval) fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
if err != nil { if err != nil {
@@ -258,6 +256,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
apiOpts := api.APIHandlerOptions{ apiOpts := api.APIHandlerOptions{
DataConnector: reader, DataConnector: reader,
SkipConfig: skipConfig, SkipConfig: skipConfig,
PreferDelta: serverOptions.PreferDelta,
PreferSpanMetrics: serverOptions.PreferSpanMetrics, PreferSpanMetrics: serverOptions.PreferSpanMetrics,
MaxIdleConns: serverOptions.MaxIdleConns, MaxIdleConns: serverOptions.MaxIdleConns,
MaxOpenConns: serverOptions.MaxOpenConns, MaxOpenConns: serverOptions.MaxOpenConns,
@@ -272,9 +271,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
Cache: c, Cache: c,
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
Gateway: gatewayProxy, Gateway: gatewayProxy,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
UseLicensesV3: serverOptions.UseLicensesV3,
} }
apiHandler, err := api.NewAPIHandler(apiOpts) apiHandler, err := api.NewAPIHandler(apiOpts)
@@ -317,10 +313,10 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter() r := baseapp.NewRouter()
r.Use(baseapp.LogCommentEnricher)
r.Use(setTimeoutMiddleware) r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware) r.Use(s.analyticsMiddleware)
r.Use(loggingMiddlewarePrivate) r.Use(loggingMiddlewarePrivate)
r.Use(baseapp.LogCommentEnricher)
apiHandler.RegisterPrivateRoutes(r) apiHandler.RegisterPrivateRoutes(r)
@@ -329,7 +325,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
// ip here for alert manager // ip here for alert manager
AllowedOrigins: []string{"*"}, AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"}, AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"}, AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
}) })
handler := c.Handler(r) handler := c.Handler(r)
@@ -346,38 +342,25 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
// add auth middleware // add auth middleware
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) { getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
user, err := auth.GetUserFromRequest(r, apiHandler) return auth.GetUserFromRequest(r, apiHandler)
if err != nil {
return nil, err
}
if user.User.OrgId == "" {
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
}
return user, nil
} }
am := baseapp.NewAuthMiddleware(getUserFromRequest) am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(baseapp.LogCommentEnricher)
r.Use(setTimeoutMiddleware) r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware) r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware) r.Use(loggingMiddleware)
r.Use(baseapp.LogCommentEnricher)
apiHandler.RegisterRoutes(r, am) apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am) apiHandler.RegisterLogsRoutes(r, am)
apiHandler.RegisterIntegrationRoutes(r, am) apiHandler.RegisterIntegrationRoutes(r, am)
apiHandler.RegisterQueryRangeV3Routes(r, am) apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterInfraMetricsRoutes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am) apiHandler.RegisterQueryRangeV4Routes(r, am)
apiHandler.RegisterWebSocketPaths(r, am)
apiHandler.RegisterMessagingQueuesRoutes(r, am)
c := cors.New(cors.Options{ c := cors.New(cors.Options{
AllowedOrigins: []string{"*"}, AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"}, AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"}, AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
}) })
handler := c.Handler(r) handler := c.Handler(r)
@@ -389,7 +372,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
}, nil }, nil
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// loggingMiddleware is used for logging public api calls // loggingMiddleware is used for logging public api calls
func loggingMiddleware(next http.Handler) http.Handler { func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -401,7 +383,6 @@ func loggingMiddleware(next http.Handler) http.Handler {
}) })
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// loggingMiddlewarePrivate is used for logging private api calls // loggingMiddlewarePrivate is used for logging private api calls
// from internal services like alert manager // from internal services like alert manager
func loggingMiddlewarePrivate(next http.Handler) http.Handler { func loggingMiddlewarePrivate(next http.Handler) http.Handler {
@@ -414,41 +395,27 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
}) })
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
type loggingResponseWriter struct { type loggingResponseWriter struct {
http.ResponseWriter http.ResponseWriter
statusCode int statusCode int
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter { func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so // WriteHeader(int) is not called if our response implicitly returns 200 OK, so
// we default to that status code. // we default to that status code.
return &loggingResponseWriter{w, http.StatusOK} return &loggingResponseWriter{w, http.StatusOK}
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
func (lrw *loggingResponseWriter) WriteHeader(code int) { func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code) lrw.ResponseWriter.WriteHeader(code)
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// Flush implements the http.Flush interface. // Flush implements the http.Flush interface.
func (lrw *loggingResponseWriter) Flush() { func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush() lrw.ResponseWriter.(http.Flusher).Flush()
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// Support websockets
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := lrw.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("hijack not supported")
}
return h.Hijack()
}
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) { func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFromV3 := "/api/v3/query_range" pathToExtractBodyFromV3 := "/api/v3/query_range"
pathToExtractBodyFromV4 := "/api/v4/query_range" pathToExtractBodyFromV4 := "/api/v4/query_range"
@@ -585,7 +552,6 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
}) })
} }
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
func setTimeoutMiddleware(next http.Handler) http.Handler { func setTimeoutMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
@@ -738,11 +704,8 @@ func makeRulesManager(
ruleRepoURL string, ruleRepoURL string,
db *sqlx.DB, db *sqlx.DB,
ch baseint.Reader, ch baseint.Reader,
cache cache.Cache,
disableRules bool, disableRules bool,
fm baseint.FeatureLookup, fm baseint.FeatureLookup) (*rules.Manager, error) {
useLogsNewSchema bool,
useTraceNewSchema bool) (*baserules.Manager, error) {
// create engine // create engine
pqle, err := pqle.FromConfigPath(promConfigPath) pqle, err := pqle.FromConfigPath(promConfigPath)
@@ -758,27 +721,23 @@ func makeRulesManager(
} }
// create manager opts // create manager opts
managerOpts := &baserules.ManagerOptions{ managerOpts := &rules.ManagerOptions{
NotifierOpts: notifierOpts, NotifierOpts: notifierOpts,
PqlEngine: pqle, Queriers: &rules.Queriers{
PqlEngine: pqle,
Ch: ch.GetConn(),
},
RepoURL: ruleRepoURL, RepoURL: ruleRepoURL,
DBConn: db, DBConn: db,
Context: context.Background(), Context: context.Background(),
Logger: zap.L(), Logger: nil,
DisableRules: disableRules, DisableRules: disableRules,
FeatureFlags: fm, FeatureFlags: fm,
Reader: ch, Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
} }
// create Manager // create Manager
manager, err := baserules.NewManager(managerOpts) manager, err := rules.NewManager(managerOpts)
if err != nil { if err != nil {
return nil, fmt.Errorf("rule manager error: %v", err) return nil, fmt.Errorf("rule manager error: %v", err)
} }

View File

@@ -11,11 +11,8 @@ const (
var LicenseSignozIo = "https://license.signoz.io/api/v1" var LicenseSignozIo = "https://license.signoz.io/api/v1"
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "") var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "") var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false") var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL") var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
// this is set via build time variable
var ZeusURL = "https://api.signoz.cloud"
func GetOrDefaultEnv(key string, fallback string) string { func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key) v := os.Getenv(key)

View File

@@ -21,24 +21,24 @@ type ModelDao interface {
DB() *sqlx.DB DB() *sqlx.DB
// auth methods // auth methods
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops // org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError)
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError CreateDomain(ctx context.Context, d *model.OrgDomain) *basemodel.ApiError
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError)
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError)
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError) GetPAT(ctx context.Context, pat string) (*model.PAT, *basemodel.ApiError)
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) *basemodel.ApiError
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError)
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError)
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError
} }

View File

@@ -17,22 +17,19 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) { func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, *basemodel.ApiError) {
// get auth domain from email domain // get auth domain from email domain
domain, apierr := m.GetDomainByEmail(ctx, email) domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil { if apierr != nil {
zap.L().Error("failed to get domain from email", zap.Error(apierr)) zap.L().Error("failed to get domain from email", zap.Error(apierr))
return nil, model.InternalErrorStr("failed to get domain from email") return nil, basemodel.InternalError(fmt.Errorf("failed to get domain from email"))
}
if domain == nil {
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
} }
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd()) hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
if err != nil { if err != nil {
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err)) zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
return nil, model.InternalErrorStr("failed to generate password hash") return nil, basemodel.InternalError(fmt.Errorf("failed to generate password hash"))
} }
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup) group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
@@ -64,12 +61,12 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
// PrepareSsoRedirect prepares redirect page link after SSO response // PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available) // is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) { func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email) userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() { if !apierr.IsNil() {
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error())) zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
return "", model.BadRequestStr("invalid user email received from the auth provider") return "", basemodel.BadRequest(fmt.Errorf("invalid user email received from the auth provider"))
} }
user := &basemodel.User{} user := &basemodel.User{}
@@ -88,7 +85,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
tokenStore, err := baseauth.GenerateJWTForUser(user) tokenStore, err := baseauth.GenerateJWTForUser(user)
if err != nil { if err != nil {
zap.L().Error("failed to generate token for SSO login user", zap.Error(err)) zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
return "", model.InternalErrorStr("failed to generate token for the user") return "", basemodel.InternalError(fmt.Errorf("failed to generate token for the user"))
} }
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s", return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
@@ -98,7 +95,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
tokenStore.RefreshJwt), nil tokenStore.RefreshJwt), nil
} }
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) { func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError) {
domain, apierr := m.GetDomainByEmail(ctx, email) domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil { if apierr != nil {
return false, apierr return false, apierr
@@ -113,7 +110,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
} }
if userPayload.Role != baseconst.AdminGroup { if userPayload.Role != baseconst.AdminGroup {
return false, model.BadRequest(fmt.Errorf("auth method not supported")) return false, basemodel.BadRequest(fmt.Errorf("auth method not supported"))
} }
} }
@@ -123,7 +120,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
// PrecheckLogin is called when the login or signup page is loaded // PrecheckLogin is called when the login or signup page is loaded
// to check sso login is to be prompted // to check sso login is to be prompted
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) { func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, *basemodel.ApiError) {
// assume user is valid unless proven otherwise // assume user is valid unless proven otherwise
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false} resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
@@ -147,7 +144,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
ssoAvailable = false ssoAvailable = false
default: default:
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err)) zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
return resp, model.BadRequestStr(err.Error()) return resp, &basemodel.ApiError{Err: err, Typ: basemodel.ErrorBadData}
} }
} }
@@ -180,7 +177,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
siteUrl, err := url.Parse(escapedUrl) siteUrl, err := url.Parse(escapedUrl)
if err != nil { if err != nil {
zap.L().Error("failed to parse referer", zap.Error(err)) zap.L().Error("failed to parse referer", zap.Error(err))
return resp, model.InternalError(fmt.Errorf("failed to generate login request")) return resp, basemodel.InternalError(fmt.Errorf("failed to generate login request"))
} }
// build Idp URL that will authenticat the user // build Idp URL that will authenticat the user
@@ -189,7 +186,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
if err != nil { if err != nil {
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err)) zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
return resp, model.InternalError(err) return resp, basemodel.InternalError(err)
} }
// set SSO to true, as the url is generated correctly // set SSO to true, as the url is generated correctly

View File

@@ -76,47 +76,47 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
} }
// GetDomainByName returns org domain for a given domain name // GetDomainByName returns org domain for a given domain name
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) { func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, *basemodel.ApiError) {
stored := StoredDomain{} stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name) err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
if err != nil { if err != nil {
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, model.BadRequest(fmt.Errorf("invalid domain name")) return nil, basemodel.BadRequest(fmt.Errorf("invalid domain name"))
} }
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId} domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil { if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
return domain, nil return domain, nil
} }
// GetDomain returns org domain for a given domain id // GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) { func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError) {
stored := StoredDomain{} stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id) err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
if err != nil { if err != nil {
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, model.BadRequest(fmt.Errorf("invalid domain id")) return nil, basemodel.BadRequest(fmt.Errorf("invalid domain id"))
} }
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId} domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil { if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
return domain, nil return domain, nil
} }
// ListDomains gets the list of auth domains by org id // ListDomains gets the list of auth domains by org id
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) { func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError) {
domains := []model.OrgDomain{} domains := []model.OrgDomain{}
stored := []StoredDomain{} stored := []StoredDomain{}
@@ -126,7 +126,7 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return []model.OrgDomain{}, nil return []model.OrgDomain{}, nil
} }
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
for _, s := range stored { for _, s := range stored {
@@ -141,20 +141,20 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
} }
// CreateDomain creates a new auth domain // CreateDomain creates a new auth domain
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError { func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError {
if domain.Id == uuid.Nil { if domain.Id == uuid.Nil {
domain.Id = uuid.New() domain.Id = uuid.New()
} }
if domain.OrgId == "" || domain.Name == "" { if domain.OrgId == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name ")) return basemodel.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
} }
configJson, err := json.Marshal(domain) configJson, err := json.Marshal(domain)
if err != nil { if err != nil {
zap.L().Error("failed to unmarshal domain config", zap.Error(err)) zap.L().Error("failed to unmarshal domain config", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed")) return basemodel.InternalError(fmt.Errorf("domain creation failed"))
} }
_, err = m.DB().ExecContext(ctx, _, err = m.DB().ExecContext(ctx,
@@ -168,24 +168,24 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
if err != nil { if err != nil {
zap.L().Error("failed to insert domain in db", zap.Error(err)) zap.L().Error("failed to insert domain in db", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed")) return basemodel.InternalError(fmt.Errorf("domain creation failed"))
} }
return nil return nil
} }
// UpdateDomain updates stored config params for a domain // UpdateDomain updates stored config params for a domain
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError { func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError {
if domain.Id == uuid.Nil { if domain.Id == uuid.Nil {
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain update failed")) return basemodel.InternalError(fmt.Errorf("domain update failed"))
} }
configJson, err := json.Marshal(domain) configJson, err := json.Marshal(domain)
if err != nil { if err != nil {
zap.L().Error("domain update failed", zap.Error(err)) zap.L().Error("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed")) return basemodel.InternalError(fmt.Errorf("domain update failed"))
} }
_, err = m.DB().ExecContext(ctx, _, err = m.DB().ExecContext(ctx,
@@ -196,18 +196,18 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
if err != nil { if err != nil {
zap.L().Error("domain update failed", zap.Error(err)) zap.L().Error("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed")) return basemodel.InternalError(fmt.Errorf("domain update failed"))
} }
return nil return nil
} }
// DeleteDomain deletes an org domain // DeleteDomain deletes an org domain
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError { func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError {
if id == uuid.Nil { if id == uuid.Nil {
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain delete failed")) return basemodel.InternalError(fmt.Errorf("domain delete failed"))
} }
_, err := m.DB().ExecContext(ctx, _, err := m.DB().ExecContext(ctx,
@@ -216,21 +216,21 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
if err != nil { if err != nil {
zap.L().Error("domain delete failed", zap.Error(err)) zap.L().Error("domain delete failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain delete failed")) return basemodel.InternalError(fmt.Errorf("domain delete failed"))
} }
return nil return nil
} }
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) { func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError) {
if email == "" { if email == "" {
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email ")) return nil, basemodel.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
} }
components := strings.Split(email, "@") components := strings.Split(email, "@")
if len(components) < 2 { if len(components) < 2 {
return nil, model.BadRequest(fmt.Errorf("invalid email address")) return nil, basemodel.BadRequest(fmt.Errorf("invalid email address"))
} }
parsedDomain := components[1] parsedDomain := components[1]
@@ -242,12 +242,12 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} }
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId} domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil { if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
return domain, nil return domain, nil
} }

View File

@@ -11,7 +11,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) { func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError) {
result, err := m.DB().ExecContext(ctx, result, err := m.DB().ExecContext(ctx,
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", "INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
p.UserID, p.UserID,
@@ -27,12 +27,12 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
) )
if err != nil { if err != nil {
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err)) zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed")) return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
} }
id, err := result.LastInsertId() id, err := result.LastInsertId()
if err != nil { if err != nil {
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err)) zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed")) return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
} }
p.Id = strconv.Itoa(int(id)) p.Id = strconv.Itoa(int(id))
createdByUser, _ := m.GetUser(ctx, p.UserID) createdByUser, _ := m.GetUser(ctx, p.UserID)
@@ -53,7 +53,7 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
return p, nil return p, nil
} }
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError { func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError {
_, err := m.DB().ExecContext(ctx, _, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;", "UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
p.Role, p.Role,
@@ -63,29 +63,29 @@ func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemo
id) id)
if err != nil { if err != nil {
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err)) zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT update failed")) return basemodel.InternalError(fmt.Errorf("PAT update failed"))
} }
return nil return nil
} }
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError { func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) *basemodel.ApiError {
_, err := m.DB().ExecContext(ctx, _, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;", "UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
lastUsed, lastUsed,
token) token)
if err != nil { if err != nil {
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err)) zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT last used update failed")) return basemodel.InternalError(fmt.Errorf("PAT last used update failed"))
} }
return nil return nil
} }
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) { func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError) {
pats := []model.PAT{} pats := []model.PAT{}
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil { if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err)) zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs")) return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PATs"))
} }
for i := range pats { for i := range pats {
createdByUser, _ := m.GetUser(ctx, pats[i].UserID) createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
@@ -123,28 +123,28 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
return pats, nil return pats, nil
} }
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError { func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError {
updatedAt := time.Now().Unix() updatedAt := time.Now().Unix()
_, err := m.DB().ExecContext(ctx, _, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3", "UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
userID, updatedAt, id) userID, updatedAt, id)
if err != nil { if err != nil {
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err)) zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT revoke failed")) return basemodel.InternalError(fmt.Errorf("PAT revoke failed"))
} }
return nil return nil
} }
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) { func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, *basemodel.ApiError) {
pats := []model.PAT{} pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil { if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT")) return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
} }
if len(pats) != 1 { if len(pats) != 1 {
return nil, &model.ApiError{ return nil, &basemodel.ApiError{
Typ: model.ErrorInternal, Typ: basemodel.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token), Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
} }
} }
@@ -152,16 +152,16 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
return &pats[0], nil return &pats[0], nil
} }
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) { func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError) {
pats := []model.PAT{} pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil { if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT")) return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
} }
if len(pats) != 1 { if len(pats) != 1 {
return nil, &model.ApiError{ return nil, &basemodel.ApiError{
Typ: model.ErrorInternal, Typ: basemodel.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token"), Err: fmt.Errorf("found zero or multiple PATs with same token"),
} }
} }
@@ -170,7 +170,7 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
} }
// deprecated // deprecated
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) { func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError) {
users := []basemodel.UserPayload{} users := []basemodel.UserPayload{}
query := `SELECT query := `SELECT
@@ -186,12 +186,12 @@ func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.U
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');` WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
if err := m.DB().Select(&users, query, token); err != nil { if err := m.DB().Select(&users, query, token); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err)) return nil, basemodel.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
} }
if len(users) != 1 { if len(users) != 1 {
return nil, &model.ApiError{ return nil, &basemodel.ApiError{
Typ: model.ErrorInternal, Typ: basemodel.ErrorInternal,
Err: fmt.Errorf("found zero or multiple users with same PAT token"), Err: fmt.Errorf("found zero or multiple users with same PAT token"),
} }
} }

View File

@@ -5,5 +5,5 @@ import (
) )
func NewNoopProxy() (*httputil.ReverseProxy, error) { func NewNoopProxy() (*httputil.ReverseProxy, error) {
return &httputil.ReverseProxy{}, nil return nil, nil
} }

View File

@@ -8,9 +8,9 @@ import (
"strings" "strings"
) )
var ( const (
RoutePrefix string = "/api/gateway" RoutePrefix string = "/api/gateway"
AllowedPrefix []string = []string{"/v1/workspaces/me", "/v2/profiles/me", "/v2/deployments/me"} AllowedPrefix string = "/v1/workspaces/me"
) )
type proxy struct { type proxy struct {

View File

@@ -13,8 +13,3 @@ type ActivationResponse struct {
ActivationId string `json:"ActivationId"` ActivationId string `json:"ActivationId"`
PlanDetails string `json:"PlanDetails"` PlanDetails string `json:"PlanDetails"`
} }
type ValidateLicenseResponse struct {
Status status `json:"status"`
Data map[string]interface{} `json:"data"`
}

View File

@@ -7,13 +7,13 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap" "go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
var C *Client var C *Client
@@ -24,14 +24,12 @@ const (
) )
type Client struct { type Client struct {
Prefix string Prefix string
GatewayUrl string
} }
func New() *Client { func New() *Client {
return &Client{ return &Client{
Prefix: constants.LicenseSignozIo, Prefix: constants.LicenseSignozIo,
GatewayUrl: constants.ZeusURL,
} }
} }
@@ -40,7 +38,7 @@ func init() {
} }
// ActivateLicense sends key to license.signoz.io and gets activation data // ActivateLicense sends key to license.signoz.io and gets activation data
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) { func ActivateLicense(key, siteId string) (*ActivationResponse, *basemodel.ApiError) {
licenseReq := map[string]string{ licenseReq := map[string]string{
"key": key, "key": key,
"siteId": siteId, "siteId": siteId,
@@ -51,13 +49,13 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
if err != nil { if err != nil {
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err)) zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection")) return nil, basemodel.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
} }
httpBody, err := io.ReadAll(httpResponse.Body) httpBody, err := io.ReadAll(httpResponse.Body)
if err != nil { if err != nil {
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err)) zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io")) return nil, basemodel.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
} }
defer httpResponse.Body.Close() defer httpResponse.Body.Close()
@@ -67,22 +65,22 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
err = json.Unmarshal(httpBody, &result) err = json.Unmarshal(httpBody, &result)
if err != nil { if err != nil {
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err)) zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response")) return nil, basemodel.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
} }
switch httpResponse.StatusCode { switch httpResponse.StatusCode {
case 200, 201: case 200, 201:
return result.Data, nil return result.Data, nil
case 400, 401: case 400, 401:
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error))) return nil, basemodel.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
default: default:
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error))) return nil, basemodel.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
} }
} }
// ValidateLicense validates the license key // ValidateLicense validates the license key
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) { func ValidateLicense(activationId string) (*ActivationResponse, *basemodel.ApiError) {
validReq := map[string]string{ validReq := map[string]string{
"activationId": activationId, "activationId": activationId,
} }
@@ -91,12 +89,12 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString)) response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil { if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection")) return nil, basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
} }
body, err := io.ReadAll(response.Body) body, err := io.ReadAll(response.Body)
if err != nil { if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io")) return nil, basemodel.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
} }
defer response.Body.Close() defer response.Body.Close()
@@ -106,73 +104,19 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
a := ActivationResult{} a := ActivationResult{}
err = json.Unmarshal(body, &a) err = json.Unmarshal(body, &a)
if err != nil { if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response")) return nil, basemodel.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
} }
return a.Data, nil return a.Data, nil
case 400, 401: case 400, 401:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)), return nil, basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io")) "bad request error received from license.signoz.io"))
default: default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)), return nil, basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io")) "internal error received from license.signoz.io"))
} }
} }
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
// Creating an HTTP client with a timeout for better control
client := &http.Client{
Timeout: 10 * time.Second,
}
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
}
// Setting the custom header
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := client.Do(req)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
}
body, err := io.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 200:
a := ValidateLicenseResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
}
license, err := model.NewLicenseV3(a.Data)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
}
return license, nil
case 400:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) { func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, POST, url, body) req, err := http.NewRequestWithContext(ctx, POST, url, body)
if err != nil { if err != nil {
@@ -184,21 +128,21 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
} }
// SendUsage reports the usage of signoz to license server // SendUsage reports the usage of signoz to license server
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError { func SendUsage(ctx context.Context, usage model.UsagePayload) *basemodel.ApiError {
reqString, _ := json.Marshal(usage) reqString, _ := json.Marshal(usage)
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString)) req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil { if err != nil {
return model.BadRequest(errors.Wrap(err, "unable to create http request")) return basemodel.BadRequest(errors.Wrap(err, "unable to create http request"))
} }
res, err := http.DefaultClient.Do(req) res, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection")) return basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
} }
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
if err != nil { if err != nil {
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io")) return basemodel.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
} }
defer res.Body.Close() defer res.Body.Close()
@@ -207,10 +151,10 @@ func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
case 200, 201: case 200, 201:
return nil return nil
case 400, 401: case 400, 401:
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)), return basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io")) "bad request error received from license.signoz.io"))
default: default:
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)), return basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io")) "internal error received from license.signoz.io"))
} }
} }

View File

@@ -3,12 +3,10 @@ package license
import ( import (
"context" "context"
"database/sql" "database/sql"
"encoding/json"
"fmt" "fmt"
"time" "time"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"go.signoz.io/signoz/ee/query-service/license/sqlite" "go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
@@ -18,15 +16,13 @@ import (
// Repo is license repo. stores license keys in a secured DB // Repo is license repo. stores license keys in a secured DB
type Repo struct { type Repo struct {
db *sqlx.DB db *sqlx.DB
useLicensesV3 bool
} }
// NewLicenseRepo initiates a new license repo // NewLicenseRepo initiates a new license repo
func NewLicenseRepo(db *sqlx.DB, useLicensesV3 bool) Repo { func NewLicenseRepo(db *sqlx.DB) Repo {
return Repo{ return Repo{
db: db, db: db,
useLicensesV3: useLicensesV3,
} }
} }
@@ -52,35 +48,9 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
return licenses, nil return licenses, nil
} }
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) { // GetActiveLicense fetches the latest active license from DB.
licensesData := []model.LicenseDB{} // If the license is not present, expect a nil license and a nil error in the output.
licenseV3Data := []*model.LicenseV3{} func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
query := "SELECT id,key,data FROM licenses_v3"
err := r.db.Select(&licensesData, query)
if err != nil {
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
}
for _, l := range licensesData {
var licenseData map[string]interface{}
err := json.Unmarshal([]byte(l.Data), &licenseData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
}
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
if err != nil {
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
}
licenseV3Data = append(licenseV3Data, license)
}
return licenseV3Data, nil
}
func (r *Repo) GetActiveLicenseV2(ctx context.Context) (*model.License, *basemodel.ApiError) {
var err error var err error
licenses := []model.License{} licenses := []model.License{}
@@ -109,70 +79,6 @@ func (r *Repo) GetActiveLicenseV2(ctx context.Context) (*model.License, *basemod
return active, nil return active, nil
} }
// GetActiveLicense fetches the latest active license from DB.
// If the license is not present, expect a nil license and a nil error in the output.
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
if r.useLicensesV3 {
zap.L().Info("Using licenses v3 for GetActiveLicense")
activeLicenseV3, err := r.GetActiveLicenseV3(ctx)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
}
if activeLicenseV3 == nil {
return nil, nil
}
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
return activeLicenseV2, nil
}
active, err := r.GetActiveLicenseV2(ctx)
if err != nil {
return nil, err
}
return active, nil
}
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
var err error
licenses := []model.LicenseDB{}
query := "SELECT id,key,data FROM licenses_v3"
err = r.db.Select(&licenses, query)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
}
var active *model.LicenseV3
for _, l := range licenses {
var licenseData map[string]interface{}
err := json.Unmarshal([]byte(l.Data), &licenseData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
}
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
if err != nil {
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
}
if active == nil &&
(license.ValidFrom != 0) &&
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
active = license
}
if active != nil &&
license.ValidFrom > active.ValidFrom &&
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
active = license
}
}
return active, nil
}
// InsertLicense inserts a new license in db // InsertLicense inserts a new license in db
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error { func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
@@ -298,59 +204,3 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
} }
return nil return nil
} }
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
_, err = r.db.ExecContext(ctx,
query,
l.ID,
l.Key,
string(licenseData),
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil
}
// UpdateLicenseV3 updates a new license v3 in db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
// the key and id for the license can't change so only update the data here!
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
}
_, err = r.db.ExecContext(ctx,
query,
license,
l.ID,
)
if err != nil {
zap.L().Error("error in updating license data: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"time" "time"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"sync" "sync"
@@ -46,17 +45,16 @@ type Manager struct {
failedAttempts uint64 failedAttempts uint64
// keep track of active license and features // keep track of active license and features
activeLicense *model.License activeLicense *model.License
activeLicenseV3 *model.LicenseV3 activeFeatures basemodel.FeatureSet
activeFeatures basemodel.FeatureSet
} }
func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...basemodel.Feature) (*Manager, error) { func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
if LM != nil { if LM != nil {
return LM, nil return LM, nil
} }
repo := NewLicenseRepo(db, useLicensesV3) repo := NewLicenseRepo(db)
err := repo.InitDB(dbType) err := repo.InitDB(dbType)
if err != nil { if err != nil {
@@ -67,32 +65,7 @@ func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...ba
repo: &repo, repo: &repo,
} }
if useLicensesV3 { if err := m.start(features...); err != nil {
// get active license from the db
active, err := m.repo.GetActiveLicenseV2(context.Background())
if err != nil {
return m, err
}
// if we have an active license then need to fetch the complete details
if active != nil {
// fetch the new license structure from control plane
licenseV3, apiError := validate.ValidateLicenseV3(active.Key)
if apiError != nil {
return m, apiError
}
// insert the licenseV3 in sqlite db
apiError = m.repo.InsertLicenseV3(context.Background(), licenseV3)
// if the license already exists move ahead.
if apiError != nil && apiError.Typ != model.ErrorConflict {
return m, apiError
}
zap.L().Info("Successfully inserted license from v2 to v3 table")
}
}
if err := m.start(useLicensesV3, features...); err != nil {
return m, err return m, err
} }
LM = m LM = m
@@ -100,14 +73,8 @@ func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...ba
} }
// start loads active license in memory and initiates validator // start loads active license in memory and initiates validator
func (lm *Manager) start(useLicensesV3 bool, features ...basemodel.Feature) error { func (lm *Manager) start(features ...basemodel.Feature) error {
err := lm.LoadActiveLicense(features...)
var err error
if useLicensesV3 {
err = lm.LoadActiveLicenseV3(features...)
} else {
err = lm.LoadActiveLicense(features...)
}
return err return err
} }
@@ -141,31 +108,6 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
go lm.Validator(context.Background()) go lm.Validator(context.Background())
} }
}
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
lm.mutex.Lock()
defer lm.mutex.Unlock()
if l == nil {
return
}
lm.activeLicenseV3 = l
lm.activeFeatures = append(l.Features, features...)
// set default features
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Panic("Couldn't activate features", zap.Error(err))
}
if !lm.validatorRunning {
// we want to make sure only one validator runs,
// we already have lock() so good to go
lm.validatorRunning = true
go lm.ValidatorV3(context.Background())
}
} }
func setDefaultFeatures(lm *Manager) { func setDefaultFeatures(lm *Manager) {
@@ -195,39 +137,17 @@ func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
return nil return nil
} }
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error { func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *basemodel.ApiError) {
active, err := lm.repo.GetActiveLicenseV3(context.Background())
if err != nil {
return err
}
if active != nil {
lm.SetActiveV3(active, features...)
} else {
zap.L().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = model.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Error("Couldn't initialize features", zap.Error(err))
return err
}
}
return nil
}
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicenses(ctx) licenses, err := lm.repo.GetLicenses(ctx)
if err != nil { if err != nil {
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
for _, l := range licenses { for _, l := range licenses {
l.ParsePlan() l.ParsePlan()
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key { if l.Key == lm.activeLicense.Key {
l.IsCurrent = true l.IsCurrent = true
} }
@@ -243,31 +163,8 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a
return return
} }
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicensesV3(ctx)
if err != nil {
return nil, model.InternalError(err)
}
for _, l := range licenses {
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
l.IsCurrent = true
}
if l.ValidUntil == -1 {
// for subscriptions, there is no end-date as such
// but for showing user some validity we default one year timespan
l.ValidUntil = l.ValidFrom + 31556926
}
response = append(response, l)
}
return response, nil
}
// Validator validates license after an epoch of time // Validator validates license after an epoch of time
func (lm *Manager) Validator(ctx context.Context) { func (lm *Manager) Validator(ctx context.Context) {
zap.L().Info("Validator started!")
defer close(lm.terminated) defer close(lm.terminated)
tick := time.NewTicker(validationFrequency) tick := time.NewTicker(validationFrequency)
defer tick.Stop() defer tick.Stop()
@@ -290,31 +187,6 @@ func (lm *Manager) Validator(ctx context.Context) {
} }
} }
// Validator validates license after an epoch of time
func (lm *Manager) ValidatorV3(ctx context.Context) {
zap.L().Info("ValidatorV3 started!")
defer close(lm.terminated)
tick := time.NewTicker(validationFrequency)
defer tick.Stop()
lm.ValidateV3(ctx)
for {
select {
case <-lm.done:
return
default:
select {
case <-lm.done:
return
case <-tick.C:
lm.ValidateV3(ctx)
}
}
}
}
// Validate validates the current active license // Validate validates the current active license
func (lm *Manager) Validate(ctx context.Context) (reterr error) { func (lm *Manager) Validate(ctx context.Context) (reterr error) {
zap.L().Info("License validation started") zap.L().Info("License validation started")
@@ -340,8 +212,8 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId) response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
if apiError != nil { if apiError != nil {
zap.L().Error("failed to validate license", zap.Error(apiError.Err)) zap.L().Error("failed to validate license", zap.Any("apiError", apiError))
return apiError.Err return apiError
} }
if response.PlanDetails == lm.activeLicense.PlanDetails { if response.PlanDetails == lm.activeLicense.PlanDetails {
@@ -382,55 +254,8 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
return nil return nil
} }
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
if apiError != nil {
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
return apiError
}
err := lm.repo.UpdateLicenseV3(ctx, license)
if err != nil {
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
}
lm.SetActiveV3(license)
return nil
}
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
zap.L().Info("License validation started")
if lm.activeLicenseV3 == nil {
return nil
}
defer func() {
lm.mutex.Lock()
lm.lastValidated = time.Now().Unix()
if reterr != nil {
zap.L().Error("License validation completed with error", zap.Error(reterr))
atomic.AddUint64(&lm.failedAttempts, 1)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "", true, false)
} else {
zap.L().Info("License validation completed with no errors")
}
lm.mutex.Unlock()
}()
err := lm.RefreshLicense(ctx)
if err != nil {
return err
}
return nil
}
// Activate activates a license key with signoz server // Activate activates a license key with signoz server
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) { func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *basemodel.ApiError) {
defer func() { defer func() {
if errResponse != nil { if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx) userEmail, err := auth.GetEmailFromJwt(ctx)
@@ -443,7 +268,7 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
response, apiError := validate.ActivateLicense(key, "") response, apiError := validate.ActivateLicense(key, "")
if apiError != nil { if apiError != nil {
zap.L().Error("failed to activate license", zap.Error(apiError.Err)) zap.L().Error("failed to activate license", zap.Any("apiError", apiError))
return nil, apiError return nil, apiError
} }
@@ -458,14 +283,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
if err != nil { if err != nil {
zap.L().Error("failed to activate license", zap.Error(err)) zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
// store the license before activating it // store the license before activating it
err = lm.repo.InsertLicense(ctx, l) err = lm.repo.InsertLicense(ctx, l)
if err != nil { if err != nil {
zap.L().Error("failed to activate license", zap.Error(err)) zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err) return nil, basemodel.InternalError(err)
} }
// license is valid, activate it // license is valid, activate it
@@ -473,35 +298,6 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
return l, nil return l, nil
} }
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
defer func() {
if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx)
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
}
}
}()
license, apiError := validate.ValidateLicenseV3(licenseKey)
if apiError != nil {
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
return nil, apiError
}
// insert the new license to the sqlite db
err := lm.repo.InsertLicenseV3(ctx, license)
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, err
}
// license is valid, activate it
lm.SetActiveV3(license)
return license, nil
}
// CheckFeature will be internally used by backend routines // CheckFeature will be internally used by backend routines
// for feature gating // for feature gating
func (lm *Manager) CheckFeature(featureKey string) error { func (lm *Manager) CheckFeature(featureKey string) error {

View File

@@ -48,16 +48,5 @@ func InitDB(db *sqlx.DB) error {
return fmt.Errorf("error in creating feature_status table: %s", err.Error()) return fmt.Errorf("error in creating feature_status table: %s", err.Error())
} }
table_schema = `CREATE TABLE IF NOT EXISTS licenses_v3 (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
data TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating licenses_v3 table: %s", err.Error())
}
return nil return nil
} }

View File

@@ -20,8 +20,6 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
prommodel "github.com/prometheus/common/model"
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder" zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync" zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
@@ -79,10 +77,6 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
return logger return logger
} }
func init() {
prommodel.NameValidationScheme = prommodel.UTF8Validation
}
func main() { func main() {
var promConfigPath, skipTopLvlOpsPath string var promConfigPath, skipTopLvlOpsPath string
@@ -93,11 +87,9 @@ func main() {
var ruleRepoURL string var ruleRepoURL string
var cluster string var cluster string
var useLogsNewSchema bool
var useTraceNewSchema bool
var useLicensesV3 bool
var cacheConfigPath, fluxInterval string var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool var enableQueryServiceLogOTLPExport bool
var preferDelta bool
var preferSpanMetrics bool var preferSpanMetrics bool
var maxIdleConns int var maxIdleConns int
@@ -105,19 +97,17 @@ func main() {
var dialTimeout time.Duration var dialTimeout time.Duration
var gatewayUrl string var gatewayUrl string
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)") flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)") flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)") flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)") flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)") flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
@@ -135,6 +125,7 @@ func main() {
HTTPHostPort: baseconst.HTTPHostPort, HTTPHostPort: baseconst.HTTPHostPort,
PromConfigPath: promConfigPath, PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath, SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferDelta: preferDelta,
PreferSpanMetrics: preferSpanMetrics, PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort, PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules, DisableRules: disableRules,
@@ -146,9 +137,6 @@ func main() {
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
Cluster: cluster, Cluster: cluster,
GatewayUrl: gatewayUrl, GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
UseLicensesV3: useLicensesV3,
} }
// Read the jwt secret key // Read the jwt secret key

View File

@@ -1,114 +1,5 @@
package model package model
import (
"fmt"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
type ApiError struct {
Typ basemodel.ErrorType
Err error
}
func (a *ApiError) Type() basemodel.ErrorType {
return a.Typ
}
func (a *ApiError) ToError() error {
if a != nil {
return a.Err
}
return a.Err
}
func (a *ApiError) Error() string {
return a.Err.Error()
}
func (a *ApiError) IsNil() bool {
return a == nil || a.Err == nil
}
// NewApiError returns a ApiError object of given type
func NewApiError(typ basemodel.ErrorType, err error) *ApiError {
return &ApiError{
Typ: typ,
Err: err,
}
}
// BadRequest returns a ApiError object of bad request
func BadRequest(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorBadData,
Err: err,
}
}
func Unauthorized(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}
}
// BadRequestStr returns a ApiError object of bad request for string input
func BadRequestStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorBadData,
Err: fmt.Errorf(s),
}
}
// InternalError returns a ApiError object of internal type
func InternalError(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorInternal,
Err: err,
}
}
// InternalErrorStr returns a ApiError object of internal type for string input
func InternalErrorStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf(s),
}
}
var (
ErrorNone basemodel.ErrorType = ""
ErrorTimeout basemodel.ErrorType = "timeout"
ErrorCanceled basemodel.ErrorType = "canceled"
ErrorExec basemodel.ErrorType = "execution"
ErrorBadData basemodel.ErrorType = "bad_data"
ErrorInternal basemodel.ErrorType = "internal"
ErrorUnavailable basemodel.ErrorType = "unavailable"
ErrorNotFound basemodel.ErrorType = "not_found"
ErrorNotImplemented basemodel.ErrorType = "not_implemented"
ErrorUnauthorized basemodel.ErrorType = "unauthorized"
ErrorForbidden basemodel.ErrorType = "forbidden"
ErrorConflict basemodel.ErrorType = "conflict"
ErrorStreamingNotSupported basemodel.ErrorType = "streaming is not supported"
)
func init() {
ErrorNone = basemodel.ErrorNone
ErrorTimeout = basemodel.ErrorTimeout
ErrorCanceled = basemodel.ErrorCanceled
ErrorExec = basemodel.ErrorExec
ErrorBadData = basemodel.ErrorBadData
ErrorInternal = basemodel.ErrorInternal
ErrorUnavailable = basemodel.ErrorUnavailable
ErrorNotFound = basemodel.ErrorNotFound
ErrorNotImplemented = basemodel.ErrorNotImplemented
ErrorUnauthorized = basemodel.ErrorUnauthorized
ErrorForbidden = basemodel.ErrorForbidden
ErrorConflict = basemodel.ErrorConflict
ErrorStreamingNotSupported = basemodel.ErrorStreamingNotSupported
}
type ErrUnsupportedAuth struct{} type ErrUnsupportedAuth struct{}
func (errUnsupportedAuth ErrUnsupportedAuth) Error() string { func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {

View File

@@ -3,8 +3,6 @@ package model
import ( import (
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"fmt"
"reflect"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -106,165 +104,3 @@ type SubscriptionServerResp struct {
Status string `json:"status"` Status string `json:"status"`
Data Licenses `json:"data"` Data Licenses `json:"data"`
} }
type Plan struct {
Name string `json:"name"`
}
type LicenseDB struct {
ID string `json:"id"`
Key string `json:"key"`
Data string `json:"data"`
}
type LicenseV3 struct {
ID string
Key string
Data map[string]interface{}
PlanName string
Features basemodel.FeatureSet
Status string
IsCurrent bool
ValidFrom int64
ValidUntil int64
}
func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
var zeroValue T
if val, ok := data[key]; ok {
if value, ok := val.(T); ok {
return value, nil
}
return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
}
return zeroValue, fmt.Errorf("%s key is missing", key)
}
func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
var features basemodel.FeatureSet
// extract id from data
licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
if err != nil {
return nil, err
}
delete(data, "id")
// extract key from data
licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
if err != nil {
return nil, err
}
delete(data, "key")
// extract status from data
status, err := extractKeyFromMapStringInterface[string](data, "status")
if err != nil {
return nil, err
}
planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
if err != nil {
return nil, err
}
planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
if err != nil {
return nil, err
}
// if license status is inactive then default it to basic
if status == LicenseStatusInactive {
planName = PlanNameBasic
}
featuresFromZeus := basemodel.FeatureSet{}
if _features, ok := data["features"]; ok {
featuresData, err := json.Marshal(_features)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal features data")
}
if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal features data")
}
}
switch planName {
case PlanNameTeams:
features = append(features, ProPlan...)
case PlanNameEnterprise:
features = append(features, EnterprisePlan...)
case PlanNameBasic:
features = append(features, BasicPlan...)
default:
features = append(features, BasicPlan...)
}
if len(featuresFromZeus) > 0 {
for _, feature := range featuresFromZeus {
exists := false
for i, existingFeature := range features {
if existingFeature.Name == feature.Name {
features[i] = feature // Replace existing feature
exists = true
break
}
}
if !exists {
features = append(features, feature) // Append if it doesn't exist
}
}
}
data["features"] = features
_validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
if err != nil {
_validFrom = 0
}
validFrom := int64(_validFrom)
_validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
if err != nil {
_validUntil = 0
}
validUntil := int64(_validUntil)
return &LicenseV3{
ID: licenseID,
Key: licenseKey,
Data: data,
PlanName: planName,
Features: features,
ValidFrom: validFrom,
ValidUntil: validUntil,
Status: status,
}, nil
}
func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
licenseDataWithIdAndKey := data
licenseDataWithIdAndKey["id"] = id
licenseDataWithIdAndKey["key"] = key
return NewLicenseV3(licenseDataWithIdAndKey)
}
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = Basic
}
return &License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: LicensePlan{
PlanKey: planKeyFromPlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
}

View File

@@ -1,170 +0,0 @@
package model
import (
"encoding/json"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
)
func TestNewLicenseV3(t *testing.T) {
testCases := []struct {
name string
data []byte
pass bool
expected *LicenseV3
error error
}{
{
name: "Error for missing license id",
data: []byte(`{}`),
pass: false,
error: errors.New("id key is missing"),
},
{
name: "Error for license id not being a valid string",
data: []byte(`{"id": 10}`),
pass: false,
error: errors.New("id key is not a valid string"),
},
{
name: "Error for missing license key",
data: []byte(`{"id":"does-not-matter"}`),
pass: false,
error: errors.New("key key is missing"),
},
{
name: "Error for invalid string license key",
data: []byte(`{"id":"does-not-matter","key":10}`),
pass: false,
error: errors.New("key key is not a valid string"),
},
{
name: "Error for missing license status",
data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
pass: false,
error: errors.New("status key is missing"),
},
{
name: "Error for invalid string license status",
data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
pass: false,
error: errors.New("status key is not a valid string"),
},
{
name: "Error for missing license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
pass: false,
error: errors.New("plan key is missing"),
},
{
name: "Error for invalid json license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
pass: false,
error: errors.New("plan key is not a valid map[string]interface {}"),
},
{
name: "Error for invalid license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
pass: false,
error: errors.New("name key is missing"),
},
{
name: "Parse the entire license properly",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"category": "FREE",
"status": "ACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameTeams,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "ACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
{
name: "Fallback to basic plan if license status is inactive",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"category": "FREE",
"status": "INACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameBasic,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "INACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
{
name: "fallback states for validFrom and validUntil",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"valid_from": 1234.456,
"valid_until": 5678.567,
"category": "FREE",
"status": "ACTIVE",
},
PlanName: PlanNameTeams,
ValidFrom: 1234,
ValidUntil: 5678,
Status: "ACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
}
for _, tc := range testCases {
var licensePayload map[string]interface{}
err := json.Unmarshal(tc.data, &licensePayload)
require.NoError(t, err)
license, err := NewLicenseV3(licensePayload)
if license != nil {
license.Features = make(model.FeatureSet, 0)
delete(license.Data, "features")
}
if tc.pass {
require.NoError(t, err)
require.NotNil(t, license)
assert.Equal(t, tc.expected, license)
} else {
require.Error(t, err)
assert.EqualError(t, err, tc.error.Error())
require.Nil(t, license)
}
}
}

View File

@@ -1,7 +1,6 @@
package model package model
import ( import (
"go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
) )
@@ -9,26 +8,9 @@ const SSO = "SSO"
const Basic = "BASIC_PLAN" const Basic = "BASIC_PLAN"
const Pro = "PRO_PLAN" const Pro = "PRO_PLAN"
const Enterprise = "ENTERPRISE_PLAN" const Enterprise = "ENTERPRISE_PLAN"
var (
PlanNameEnterprise = "ENTERPRISE"
PlanNameTeams = "TEAMS"
PlanNameBasic = "BASIC"
)
var (
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
)
var (
LicenseStatusInactive = "INACTIVE"
)
const DisableUpsell = "DISABLE_UPSELL" const DisableUpsell = "DISABLE_UPSELL"
const Onboarding = "ONBOARDING" const Onboarding = "ONBOARDING"
const ChatSupport = "CHAT_SUPPORT" const ChatSupport = "CHAT_SUPPORT"
const Gateway = "GATEWAY"
const PremiumSupport = "PREMIUM_SUPPORT"
var BasicPlan = basemodel.FeatureSet{ var BasicPlan = basemodel.FeatureSet{
basemodel.Feature{ basemodel.Feature{
@@ -129,34 +111,6 @@ var BasicPlan = basemodel.FeatureSet{
UsageLimit: -1, UsageLimit: -1,
Route: "", Route: "",
}, },
basemodel.Feature{
Name: Gateway,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: PremiumSupport,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AnomalyDetection,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.HostsInfraMonitoring,
Active: constants.EnableHostsInfraMonitoring(),
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }
var ProPlan = basemodel.FeatureSet{ var ProPlan = basemodel.FeatureSet{
@@ -251,34 +205,6 @@ var ProPlan = basemodel.FeatureSet{
UsageLimit: -1, UsageLimit: -1,
Route: "", Route: "",
}, },
basemodel.Feature{
Name: Gateway,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: PremiumSupport,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AnomalyDetection,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.HostsInfraMonitoring,
Active: constants.EnableHostsInfraMonitoring(),
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }
var EnterprisePlan = basemodel.FeatureSet{ var EnterprisePlan = basemodel.FeatureSet{
@@ -387,32 +313,4 @@ var EnterprisePlan = basemodel.FeatureSet{
UsageLimit: -1, UsageLimit: -1,
Route: "", Route: "",
}, },
basemodel.Feature{
Name: Gateway,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: PremiumSupport,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.AnomalyDetection,
Active: true,
Usage: 0,
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.HostsInfraMonitoring,
Active: constants.EnableHostsInfraMonitoring(),
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }

View File

@@ -1,398 +0,0 @@
package rules
import (
"context"
"encoding/json"
"fmt"
"math"
"strings"
"sync"
"time"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/anomaly"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.signoz.io/signoz/pkg/query-service/utils/times"
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
"go.signoz.io/signoz/pkg/query-service/formatter"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
yaml "gopkg.in/yaml.v2"
)
const (
RuleTypeAnomaly = "anomaly_rule"
)
type AnomalyRule struct {
*baserules.BaseRule
mtx sync.Mutex
reader interfaces.Reader
// querierV2 is used for alerts created after the introduction of new metrics query builder
querierV2 interfaces.Querier
provider anomaly.Provider
seasonality anomaly.Seasonality
}
func NewAnomalyRule(
id string,
p *baserules.PostableRule,
featureFlags interfaces.FeatureLookup,
reader interfaces.Reader,
cache cache.Cache,
opts ...baserules.RuleOption,
) (*AnomalyRule, error) {
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
if p.RuleCondition.CompareOp == baserules.ValueIsBelow {
target := -1 * *p.RuleCondition.Target
p.RuleCondition.Target = &target
}
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
if err != nil {
return nil, err
}
t := AnomalyRule{
BaseRule: baseRule,
}
switch strings.ToLower(p.RuleCondition.Seasonality) {
case "hourly":
t.seasonality = anomaly.SeasonalityHourly
case "daily":
t.seasonality = anomaly.SeasonalityDaily
case "weekly":
t.seasonality = anomaly.SeasonalityWeekly
default:
t.seasonality = anomaly.SeasonalityDaily
}
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
querierOptsV2 := querierV2.QuerierOptions{
Reader: reader,
Cache: cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags,
}
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
t.reader = reader
if t.seasonality == anomaly.SeasonalityHourly {
t.provider = anomaly.NewHourlyProvider(
anomaly.WithCache[*anomaly.HourlyProvider](cache),
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.HourlyProvider](reader),
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](featureFlags),
)
} else if t.seasonality == anomaly.SeasonalityDaily {
t.provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](reader),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](featureFlags),
)
} else if t.seasonality == anomaly.SeasonalityWeekly {
t.provider = anomaly.NewWeeklyProvider(
anomaly.WithCache[*anomaly.WeeklyProvider](cache),
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](featureFlags),
)
}
return &t, nil
}
func (r *AnomalyRule) Type() baserules.RuleType {
return RuleTypeAnomaly
}
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
end := ts.UnixMilli()
if r.EvalDelay() > 0 {
start = start - int64(r.EvalDelay().Milliseconds())
end = end - int64(r.EvalDelay().Milliseconds())
}
// round to minute otherwise we could potentially miss data
start = start - (start % (60 * 1000))
end = end - (end % (60 * 1000))
compositeQuery := r.Condition().CompositeQuery
if compositeQuery.PanelType != v3.PanelTypeGraph {
compositeQuery.PanelType = v3.PanelTypeGraph
}
// default mode
return &v3.QueryRangeParamsV3{
Start: start,
End: end,
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
CompositeQuery: compositeQuery,
Variables: make(map[string]interface{}, 0),
NoCache: false,
}, nil
}
func (r *AnomalyRule) GetSelectedQuery() string {
return r.Condition().GetSelectedQueryName()
}
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baserules.Vector, error) {
params, err := r.prepareQueryRange(ts)
if err != nil {
return nil, err
}
err = r.PopulateTemporality(ctx, params)
if err != nil {
return nil, fmt.Errorf("internal error while setting temporality")
}
anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{
Params: params,
Seasonality: r.seasonality,
})
if err != nil {
return nil, err
}
var queryResult *v3.Result
for _, result := range anomalies.Results {
if result.QueryName == r.GetSelectedQuery() {
queryResult = result
break
}
}
var resultVector baserules.Vector
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
for _, series := range queryResult.AnomalyScores {
smpl, shouldAlert := r.ShouldAlert(*series)
if shouldAlert {
resultVector = append(resultVector, smpl)
}
}
return resultVector, nil
}
func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) {
prevState := r.State()
valueFormatter := formatter.FromUnit(r.Unit())
res, err := r.buildAndRunQuery(ctx, ts)
if err != nil {
return nil, err
}
r.mtx.Lock()
defer r.mtx.Unlock()
resultFPs := map[uint64]struct{}{}
var alerts = make(map[uint64]*baserules.Alert, len(res))
for _, smpl := range res {
l := make(map[string]string, len(smpl.Metric))
for _, lbl := range smpl.Metric {
l[lbl.Name] = lbl.Value
}
value := valueFormatter.Format(smpl.V, r.Unit())
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
tmplData := baserules.AlertTemplateData(l, value, threshold)
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}"
// utility function to apply go template on labels and annotations
expand := func(text string) string {
tmpl := baserules.NewTemplateExpander(
ctx,
defs+text,
"__alert_"+r.Name(),
tmplData,
times.Time(timestamp.FromTime(ts)),
nil,
)
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
}
return result
}
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel)
resultLabels := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel).Labels()
for name, value := range r.Labels().Map() {
lb.Set(name, expand(value))
}
lb.Set(labels.AlertNameLabel, r.Name())
lb.Set(labels.AlertRuleIdLabel, r.ID())
lb.Set(labels.RuleSourceLabel, r.GeneratorURL())
annotations := make(labels.Labels, 0, len(r.Annotations().Map()))
for name, value := range r.Annotations().Map() {
annotations = append(annotations, labels.Label{Name: name, Value: expand(value)})
}
if smpl.IsMissing {
lb.Set(labels.AlertNameLabel, "[No data] "+r.Name())
}
lbs := lb.Labels()
h := lbs.Hash()
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
return nil, err
}
alerts[h] = &baserules.Alert{
Labels: lbs,
QueryResultLables: resultLabels,
Annotations: annotations,
ActiveAt: ts,
State: model.StatePending,
Value: smpl.V,
GeneratorURL: r.GeneratorURL(),
Receivers: r.PreferredChannels(),
Missing: smpl.IsMissing,
}
}
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {
// Check whether we already have alerting state for the identifying label set.
// Update the last value and annotations if so, create a new alert entry otherwise.
if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive {
alert.Value = a.Value
alert.Annotations = a.Annotations
alert.Receivers = r.PreferredChannels()
continue
}
r.Active[h] = a
}
itemsToAdd := []model.RuleStateHistory{}
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
for fp, a := range r.Active {
labelsJSON, err := json.Marshal(a.QueryResultLables)
if err != nil {
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
}
if _, ok := resultFPs[fp]; !ok {
// If the alert was previously firing, keep it around for a given
// retention time so it is reported as resolved to the AlertManager.
if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > baserules.ResolvedRetention) {
delete(r.Active, fp)
}
if a.State != model.StateInactive {
a.State = model.StateInactive
a.ResolvedAt = ts
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: model.StateInactive,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
}
continue
}
if a.State == model.StatePending && ts.Sub(a.ActiveAt) >= r.HoldDuration() {
a.State = model.StateFiring
a.FiredAt = ts
state := model.StateFiring
if a.Missing {
state = model.StateNoData
}
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
}
}
currentState := r.State()
overallStateChanged := currentState != prevState
for idx, item := range itemsToAdd {
item.OverallStateChanged = overallStateChanged
item.OverallState = currentState
itemsToAdd[idx] = item
}
r.RecordRuleStateHistory(ctx, prevState, currentState, itemsToAdd)
return len(r.Active), nil
}
func (r *AnomalyRule) String() string {
ar := baserules.PostableRule{
AlertName: r.Name(),
RuleCondition: r.Condition(),
EvalWindow: baserules.Duration(r.EvalWindow()),
Labels: r.Labels().Map(),
Annotations: r.Annotations().Map(),
PreferredChannels: r.PreferredChannels(),
}
byt, err := yaml.Marshal(ar)
if err != nil {
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
}
return string(byt)
}

View File

@@ -1,196 +0,0 @@
package rules
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
rules := make([]baserules.Rule, 0)
var task baserules.Task
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
if opts.Rule.RuleType == baserules.RuleTypeThreshold {
// create a threshold rule
tr, err := baserules.NewThresholdRule(
ruleId,
opts.Rule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
)
if err != nil {
return task, err
}
rules = append(rules, tr)
// create ch rule task for evalution
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
} else if opts.Rule.RuleType == baserules.RuleTypeProm {
// create promql rule
pr, err := baserules.NewPromRule(
ruleId,
opts.Rule,
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
)
if err != nil {
return task, err
}
rules = append(rules, pr)
// create promql rule task for evalution
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
} else if opts.Rule.RuleType == baserules.RuleTypeAnomaly {
// create anomaly rule
ar, err := NewAnomalyRule(
ruleId,
opts.Rule,
opts.FF,
opts.Reader,
opts.Cache,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
)
if err != nil {
return task, err
}
rules = append(rules, ar)
// create anomaly rule task for evalution
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
} else {
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, baserules.RuleTypeProm, baserules.RuleTypeThreshold)
}
return task, nil
}
// TestNotification prepares a dummy rule for given rule parameters and
// sends a test notification. returns alert count and error (if any)
func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.ApiError) {
ctx := context.Background()
if opts.Rule == nil {
return 0, basemodel.BadRequest(fmt.Errorf("rule is required"))
}
parsedRule := opts.Rule
var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
alertname = uuid.New().String()
}
// append name to indicate this is test alert
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, baserules.TestAlertPostFix)
var rule baserules.Rule
var err error
if parsedRule.RuleType == baserules.RuleTypeThreshold {
// add special labels for test alerts
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Labels[labels.RuleSourceLabel] = ""
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
// create a threshold rule
rule, err = baserules.NewThresholdRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == baserules.RuleTypeProm {
// create promql rule
rule, err = baserules.NewPromRule(
alertname,
parsedRule,
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == baserules.RuleTypeAnomaly {
// create anomaly rule
rule, err = NewAnomalyRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.Cache,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else {
return 0, basemodel.BadRequest(fmt.Errorf("failed to derive ruletype with given information"))
}
// set timestamp to current utc time
ts := time.Now().UTC()
count, err := rule.Eval(ctx, ts)
if err != nil {
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
}
alertsFound, ok := count.(int)
if !ok {
return 0, basemodel.InternalError(fmt.Errorf("something went wrong"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)
return alertsFound, nil
}
// newTask returns an appropriate group for
// rule type
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
if taskType == baserules.TaskTypeCh {
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
}
return baserules.NewPromRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
}

View File

@@ -190,7 +190,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
} else if apiErr != nil { } else if apiErr != nil {
// sleeping for exponential backoff // sleeping for exponential backoff
sleepDuration := RetryInterval * time.Duration(i) sleepDuration := RetryInterval * time.Duration(i)
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err)) zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Any("apiErr", apiErr))
time.Sleep(sleepDuration) time.Sleep(sleepDuration)
} else { } else {
break break

View File

@@ -9,7 +9,6 @@ const config: Config.InitialOptions = {
modulePathIgnorePatterns: ['dist'], modulePathIgnorePatterns: ['dist'],
moduleNameMapper: { moduleNameMapper: {
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts', '\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
'\\.md$': '<rootDir>/__mocks__/cssMock.ts',
}, },
globals: { globals: {
extensionsToTreatAsEsm: ['.ts'], extensionsToTreatAsEsm: ['.ts'],

View File

@@ -34,15 +34,15 @@
"@dnd-kit/core": "6.1.0", "@dnd-kit/core": "6.1.0",
"@dnd-kit/modifiers": "7.0.0", "@dnd-kit/modifiers": "7.0.0",
"@dnd-kit/sortable": "8.0.0", "@dnd-kit/sortable": "8.0.0",
"@grafana/data": "^11.2.3", "@grafana/data": "^9.5.2",
"@mdx-js/loader": "2.3.0", "@mdx-js/loader": "2.3.0",
"@mdx-js/react": "2.3.0", "@mdx-js/react": "2.3.0",
"@monaco-editor/react": "^4.3.1", "@monaco-editor/react": "^4.3.1",
"@radix-ui/react-tabs": "1.0.4", "@radix-ui/react-tabs": "1.0.4",
"@radix-ui/react-tooltip": "1.0.7", "@radix-ui/react-tooltip": "1.0.7",
"@sentry/react": "8.41.0", "@sentry/react": "7.102.1",
"@sentry/webpack-plugin": "2.22.6", "@sentry/webpack-plugin": "2.16.0",
"@signozhq/design-tokens": "1.1.4", "@signozhq/design-tokens": "0.0.8",
"@uiw/react-md-editor": "3.23.5", "@uiw/react-md-editor": "3.23.5",
"@visx/group": "3.3.0", "@visx/group": "3.3.0",
"@visx/shape": "3.5.0", "@visx/shape": "3.5.0",
@@ -51,7 +51,7 @@
"ansi-to-html": "0.7.2", "ansi-to-html": "0.7.2",
"antd": "5.11.0", "antd": "5.11.0",
"antd-table-saveas-excel": "2.2.1", "antd-table-saveas-excel": "2.2.1",
"axios": "1.7.7", "axios": "1.6.4",
"babel-eslint": "^10.1.0", "babel-eslint": "^10.1.0",
"babel-jest": "^29.6.4", "babel-jest": "^29.6.4",
"babel-loader": "9.1.3", "babel-loader": "9.1.3",
@@ -68,7 +68,7 @@
"css-loader": "5.0.0", "css-loader": "5.0.0",
"css-minimizer-webpack-plugin": "5.0.1", "css-minimizer-webpack-plugin": "5.0.1",
"dayjs": "^1.10.7", "dayjs": "^1.10.7",
"dompurify": "3.1.3", "dompurify": "3.0.0",
"dotenv": "8.2.0", "dotenv": "8.2.0",
"event-source-polyfill": "1.0.31", "event-source-polyfill": "1.0.31",
"eventemitter3": "5.0.1", "eventemitter3": "5.0.1",
@@ -76,7 +76,7 @@
"fontfaceobserver": "2.3.0", "fontfaceobserver": "2.3.0",
"history": "4.10.1", "history": "4.10.1",
"html-webpack-plugin": "5.5.0", "html-webpack-plugin": "5.5.0",
"http-proxy-middleware": "3.0.3", "http-proxy-middleware": "2.0.6",
"i18next": "^21.6.12", "i18next": "^21.6.12",
"i18next-browser-languagedetector": "^6.1.3", "i18next-browser-languagedetector": "^6.1.3",
"i18next-http-backend": "^1.3.2", "i18next-http-backend": "^1.3.2",
@@ -87,10 +87,7 @@
"lodash-es": "^4.17.21", "lodash-es": "^4.17.21",
"lucide-react": "0.379.0", "lucide-react": "0.379.0",
"mini-css-extract-plugin": "2.4.5", "mini-css-extract-plugin": "2.4.5",
"overlayscrollbars": "^2.8.1",
"overlayscrollbars-react": "^0.5.6",
"papaparse": "5.4.1", "papaparse": "5.4.1",
"posthog-js": "1.160.3",
"rc-tween-one": "3.0.6", "rc-tween-one": "3.0.6",
"react": "18.2.0", "react": "18.2.0",
"react-addons-update": "15.6.3", "react-addons-update": "15.6.3",
@@ -109,7 +106,6 @@
"react-query": "3.39.3", "react-query": "3.39.3",
"react-redux": "^7.2.2", "react-redux": "^7.2.2",
"react-router-dom": "^5.2.0", "react-router-dom": "^5.2.0",
"react-router-dom-v5-compat": "6.27.0",
"react-syntax-highlighter": "15.5.0", "react-syntax-highlighter": "15.5.0",
"react-use": "^17.3.2", "react-use": "^17.3.2",
"react-virtuoso": "4.0.3", "react-virtuoso": "4.0.3",
@@ -124,11 +120,11 @@
"ts-node": "^10.2.1", "ts-node": "^10.2.1",
"tsconfig-paths-webpack-plugin": "^3.5.1", "tsconfig-paths-webpack-plugin": "^3.5.1",
"typescript": "^4.0.5", "typescript": "^4.0.5",
"uplot": "1.6.31", "uplot": "1.6.26",
"uuid": "^8.3.2", "uuid": "^8.3.2",
"web-vitals": "^0.2.4", "web-vitals": "^0.2.4",
"webpack": "5.94.0", "webpack": "5.88.2",
"webpack-dev-server": "^4.15.2", "webpack-dev-server": "^4.15.1",
"webpack-retry-chunk-load-plugin": "3.1.1", "webpack-retry-chunk-load-plugin": "3.1.1",
"xstate": "^4.31.0" "xstate": "^4.31.0"
}, },
@@ -208,6 +204,7 @@
"eslint-plugin-sonarjs": "^0.12.0", "eslint-plugin-sonarjs": "^0.12.0",
"husky": "^7.0.4", "husky": "^7.0.4",
"is-ci": "^3.0.1", "is-ci": "^3.0.1",
"jest-playwright-preset": "^1.7.2",
"jest-styled-components": "^7.0.8", "jest-styled-components": "^7.0.8",
"lint-staged": "^12.5.0", "lint-staged": "^12.5.0",
"msw": "1.3.2", "msw": "1.3.2",
@@ -240,8 +237,6 @@
"debug": "4.3.4", "debug": "4.3.4",
"semver": "7.5.4", "semver": "7.5.4",
"xml2js": "0.5.0", "xml2js": "0.5.0",
"phin": "^3.7.1", "phin": "^3.7.1"
"body-parser": "1.20.3",
"http-proxy-middleware": "3.0.3"
} }
} }

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="none"><path fill="#A65F3E" d="M8.04 10.331a.41.41 0 0 1-.414-.414.4.4 0 0 1 .121-.292l8.071-8.071a.414.414 0 1 1 .585.585l-8.07 8.071a.4.4 0 0 1-.293.121"/><path fill="#A65F3E" d="M16.11 1.5c.09 0 .178.034.245.101a.35.35 0 0 1 0 .492l-8.07 8.07a.346.346 0 0 1-.49 0 .35.35 0 0 1 0-.49l8.07-8.072a.35.35 0 0 1 .245-.101m0-.133a.48.48 0 0 0-.338.14L7.7 9.578a.47.47 0 0 0-.14.34.475.475 0 0 0 .478.478c.13 0 .25-.05.34-.14l8.07-8.071a.48.48 0 0 0-.339-.818"/><path fill="#FFE082" d="m1.701 12.438 3.89 3.889c.873-.963 1.62-2.057 2.023-3.313.03-.091.034-.24.128-.359.451-.566 1.865-2.008.706-3.167-1.106-1.106-2.438.227-2.994.686-.17.14-.384.228-.606.276-1.493.326-3.034 1.869-3.147 1.988"/><path fill="#FFE082" d="M8.385 8.577a.62.62 0 0 1 .393-.085c.098.018.237.135.38.28.144.143.28.304.32.408s-.005.242-.005.242c-.116.23-.383.69-.6.624-.24-.074-.482-.305-.66-.479a1.5 1.5 0 0 1-.276-.328c-.096-.177.008-.324.129-.447.086-.082.232-.17.319-.215"/><path fill="#F9C248" d="M8.327 8.975c.116.11.21.243.339.338.252.185.455.097.62-.052.049-.044.122-.1.17-.055a.1.1 0 0 1 .025.051.45.45 0 0 1-.045.273 1.3 1.3 0 0 1-.433.529c-.032.022-.07.044-.11.032a.12.12 0 0 1-.056-.045c-.207-.244-.37-.533-.626-.724-.103-.076-.364-.132-.298-.303.1-.262.317-.137.414-.044"/><path fill="#F9C248" d="M7.614 13.014c.028-.091.033-.24.127-.359.515-.645 1.223-1.38 1.145-2.275-.01-.123-.169-.75-.342-.514-.04.052-.024.315-.03.379-.1 1.172-1.02 1.821-1.19 2.024s-.164.393-.31.695a5 5 0 0 1-.61.947c-.379.47-.825.88-1.286 1.27a.8.8 0 0 0-.203.217c-.131.241.153.406.305.558l.369.368c.873-.961 1.62-2.055 2.025-3.31"/><path fill="#E2A610" d="M5.537 15.809c-.1-.157-.242-.3-.317-.458a.24.24 0 0 1-.03-.123c.01-.08.13-.15.187-.198q.129-.108.254-.22c.162-.149.314-.314.419-.509.017-.031.032-.07.016-.102-.035-.065-.238.152-.275.186-.105.092-.208.187-.318.272-.146.113-.422.304-.618.213-.1-.046-.19-.169-.263-.249-.084-.094-.164-.191-.252-.283a17 17 0 0 0-.592-.582c-.05-.046-.06-.066-.003-.122a10 10 0 0 0 .546-.58c.022-.025.044-.067.017-.09-.018-.015-.048-.004-.07.007-.26.138-.467.354-.692.544-.055.046-.214-.13-.249-.158-.092-.073-.154-.102-.046-.21.484-.49.972-.946 1.554-1.323.107-.07.22-.14.28-.253-.01-.03-.054-.026-.085-.015-.807.29-1.89 1.291-1.983 1.38-.162.158-.454-.206-.885-.481-.147-.094 0-.235.038-.279.26-.307.603-.642.603-.642-.127.013-.956.76-1.054.873-.084.097-.17.184-.175.318a.52.52 0 0 0 .107.325c.77 1.05 2.586 2.794 3.23 3.253.384.274.502.224.659.068a.35.35 0 0 0 .105-.3.65.65 0 0 0-.108-.263"/><path fill="#A65F3E" d="M8.835 10.176s-.438-.825-1.017-1.074c0 0-.02-.054.02-.1.052-.057.12-.07.157-.046.427.265.812.619 1.007 1.102.039.093-.131.23-.167.118"/><path fill="#F44336" d="M7.64 12.88c-.528-.818-1.63-1.937-2.46-2.524-.066-.046.204-.204.272-.156a9.7 9.7 0 0 1 2.31 2.398c.045.067-.091.33-.123.282M8.193 12.078c-.506-.71-1.521-1.738-2.238-2.312-.062-.05.182-.22.232-.181.755.602 1.668 1.499 2.18 2.263.037.057-.138.282-.174.23"/></svg>

Before

Width:  |  Height:  |  Size: 2.9 KiB

View File

@@ -1 +0,0 @@
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4344_1236)" stroke="#C0C1C3" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M4.667 1.167H2.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V2.333c0-.644-.522-1.166-1.166-1.166zM8.167 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M11.667 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M5.833 10.5H2.917c-.992 0-1.75-.758-1.75-1.75v-.583"/><path d="M4.083 12.25l1.75-1.75-1.75-1.75M11.667 8.167H9.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V9.333c0-.644-.522-1.166-1.166-1.166z"/></g><defs><clipPath id="prefix__clip0_4344_1236"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>

Before

Width:  |  Height:  |  Size: 878 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="16" fill="none"><path fill="#616161" fill-rule="evenodd" d="M8.096 2.885H4.372V2.51h3.724zM8.096 4.79H4.372v-.375h3.724z" clip-rule="evenodd"/><path fill="#9E9E9E" d="M7.098 15.539H5.662V.936s.134-.311.719-.311.719.311.719.311v14.603z"/><path fill="#757575" d="M6.73.671V12.47H5.662v1.074c.181.001.345.023.493.055.336.074.576.37.576.714v1.227H7.1V.936c-.002 0-.08-.179-.37-.265"/><path fill="#2196F3" d="M10.58.54a3.03 3.03 0 0 0-3.028 3.038 3.02 3.02 0 0 0 3.027 3.028 3.025 3.025 0 0 0 3.028-3.028A3.035 3.035 0 0 0 10.579.54"/><path fill="#fff" d="M11.902 1.671c-.19-.048-.569-.098-1.321-.098-.753 0-1.132.05-1.322.098-.112.029-.488.185-.488.606v2.598c0 .142.115.258.258.258h.095v.288c0 .084.068.151.152.151h.306a.15.15 0 0 0 .151-.151v-.288h1.693v.288c0 .084.067.151.15.151h.307a.15.15 0 0 0 .151-.151v-.288h.098a.26.26 0 0 0 .259-.258V2.277c0-.404-.377-.579-.49-.606m-2.139.206c0-.064.051-.115.115-.115h1.403c.063 0 .115.051.115.115v.204a.115.115 0 0 1-.115.115H9.878a.115.115 0 0 1-.115-.115zm.024 2.736a.08.08 0 0 1-.078.078h-.308a.264.264 0 0 1-.264-.264v-.139c0-.042.035-.077.077-.077h.31c.144 0 .263.117.263.264zm2.235-.186a.264.264 0 0 1-.264.264h-.309a.08.08 0 0 1-.077-.078v-.138c0-.145.117-.264.264-.264h.308c.043 0 .078.035.078.077zm.07-1.129c0 .168-.363.46-1.513.46s-1.512-.27-1.512-.46v-.767c0-.05.05-.175.175-.175h2.695c.125 0 .155.126.155.175z"/><path fill="#F5F5F5" d="M8.61 12.867H4.15a.285.285 0 0 1-.285-.285v-5.15c0-.158.127-.285.285-.285h4.457c.158 0 .285.127.285.285v5.15a.285.285 0 0 1-.284.285"/><path fill="#82AEC0" d="M8.128 12.015H4.632l-.01-4.07H8.12z" opacity=".8"/><path fill="#F5F5F5" fill-rule="evenodd" d="M6.246 12.07V7.945h.25v4.123z" clip-rule="evenodd"/><path fill="#616161" d="M6.246 7.946H4.622v.34h1.624z"/><path fill="#F5F5F5" fill-rule="evenodd" d="M8.142 11.307H4.618v-.125h3.524zM8.142 10.482H4.618v-.125h3.524zM8.12 9.657H4.621v-.125H8.12zM8.12 8.833H4.617v-.125H8.12z" clip-rule="evenodd"/><path fill="#616161" d="M8.118 9.426H6.495v.34h1.623zM6.253 10.25H4.635v.34h1.618z"/><path fill="#9E9E9E" fill-rule="evenodd" d="M4.15 7.334a.097.097 0 0 0-.097.098v5.15c0 .054.044.097.098.097h4.458a.097.097 0 0 0 .097-.097v-5.15a.097.097 0 0 0-.098-.098zm-.472.098c0-.261.212-.473.473-.473h4.457c.261 0 .473.212.473.473v5.15c0 .26-.211.472-.472.472H4.151a.47.47 0 0 1-.473-.472z" clip-rule="evenodd"/><path fill="#757575" d="M4.17 12.682c-.194.017-.194-.11-.194-.145V7.493c0-.141.115-.256.256-.256H8.56c.118 0 .172.071.148.216 0 0-.015-.092-.128-.092H4.233a.133.133 0 0 0-.132.132v5.045c0 .117.069.144.069.144"/><path fill="#FFCA28" d="M4.9 1.775H.642v3.7h4.26z"/><path fill="#9E9E9E" d="M4.663 1.775c.132 0 .238.106.238.237v3.225a.237.237 0 0 1-.238.237H.88a.237.237 0 0 1-.238-.237V2.012c0-.131.107-.237.238-.237zm0-.25H.88a.49.49 0 0 0-.488.487v3.225c0 .269.22.487.488.487h3.783a.49.49 0 0 0 .488-.487V2.012a.487.487 0 0 0-.488-.487"/><path fill="#FFFDE7" fill-rule="evenodd" d="M4.902 3.11H.642v-.25h4.26zM4.902 4.388H.642v-.25h4.26z" clip-rule="evenodd"/><path fill="#757575" d="M1.975 2.186H.904v.282h1.07zM1.711 4.777H.904v.283h.807zM4.552 4.777h-.807v.283h.807zM4.552 2.186h-.807v.282h.807zM3.795 3.482H3.33v.282h.465zM4.552 3.482h-.465v.282h.465zM2.388 3.482H.904v.282h1.484z"/></svg>

Before

Width:  |  Height:  |  Size: 3.2 KiB

View File

@@ -1 +0,0 @@
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4062_7291)" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M7 12.833A5.833 5.833 0 107 1.167a5.833 5.833 0 000 11.666z" fill="#E5484D" stroke="#E5484D"/><path d="M8.75 5.25l-3.5 3.5M5.25 5.25l3.5 3.5" stroke="#121317"/></g><defs><clipPath id="prefix__clip0_4062_7291"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>

Before

Width:  |  Height:  |  Size: 467 B

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.94" y1="3.74" x2="8.67" y2="3.74" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="b" x1="9.13" y1="3.79" x2="14.85" y2="3.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="c" x1=".01" y1="9.12" x2="5.73" y2="9.12" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="d" x1="6.18" y1="9.08" x2="11.9" y2="9.08" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="e" x1="12.35" y1="9.13" x2="18.08" y2="9.13" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="f" x1="2.87" y1="14.56" x2="8.6" y2="14.56" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="g" x1="9.05" y1="14.6" x2="14.78" y2="14.6" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient></defs><path fill="url(#a)" d="M5.8 1.22l-2.86.53v3.9l2.86.61 2.87-1.15V2.2L5.8 1.22z"/><path d="M5.91 6.2l2.62-1.06A.2.2 0 008.65 5V2.36a.21.21 0 00-.13-.18l-2.65-.9h-.12l-2.6.48a.2.2 0 00-.15.18v3.53a.19.19 0 00.15.19l2.63.55a.32.32 0 00.13-.01z" fill="none"/><path d="M2.94 1.75v3.9l2.89.61v-5zm1.22 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2l.93-.16z" fill="#341a6e"/><path fill="url(#b)" d="M11.99 1.27l-2.86.53v3.9l2.86.61 2.86-1.16v-2.9l-2.86-.98z"/><path d="M9.13 1.8v3.9l2.87.61v-5zm1.21 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2.05l.93-.17z" fill="#341a6e"/><path fill="url(#c)" d="M2.87 6.6l-2.86.53v3.9l2.86.61 2.87-1.15V7.58L2.87 6.6z"/><path d="M0 7.13V11l2.89.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.27.26l-.93-.15V7.38l.93-.16z" fill="#341a6e"/><path fill="url(#d)" d="M9.04 6.56l-2.86.53v3.9l2.86.62 2.86-1.16V7.54l-2.86-.98z"/><path d="M6.18 7.09V11l2.88.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.34l.93-.16z" fill="#341a6e"/><path fill="url(#e)" d="M15.21 6.61l-2.86.53v3.9l2.86.61 2.87-1.15V7.59l-2.87-.98z"/><path d="M12.35 7.14V11l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.39l.93-.16z" fill="#341a6e"/><path fill="url(#f)" d="M5.73 12.04l-2.86.52v3.9l2.86.62 2.87-1.16v-2.9l-2.87-.98z"/><path d="M5.84 17l2.61-1a.18.18 0 00.12-.18v-2.6a.2.2 0 00-.13-.22l-2.64-.9a.17.17 0 00-.12 0l-2.6.47a.19.19 0 00-.16.19v3.54a.19.19 0 00.15.19L5.7 17a.23.23 0 00.14 0z" fill="none"/><path d="M2.87 12.56v3.9l2.89.62V12zm1.22 3.61L3.28 16v-3l.81-.14zm1.26.23l-.93-.15v-3.44l.93-.16z" fill="#341a6e"/><path fill="url(#g)" d="M11.91 12.08l-2.86.53v3.9l2.86.61 2.87-1.15v-2.91l-2.87-.98z"/><path d="M9.05 12.61v3.9l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15v-3.43l.93-.16z" fill="#341a6e"/></svg>

Before

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="b" x1="4.4" y1="11.48" x2="4.37" y2="7.53" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="c" x1="10.13" y1="15.45" x2="10.13" y2="11.9" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="d" x1="14.18" y1="11.15" x2="14.18" y2="7.38" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><radialGradient id="a" cx="13428.81" cy="3518.86" r="56.67" gradientTransform="matrix(.15 0 0 .15 -2005.33 -518.83)" gradientUnits="userSpaceOnUse"><stop offset=".18" stop-color="#5ea0ef"/><stop offset="1" stop-color="#0078d4"/></radialGradient></defs><path d="M14.21 15.72A8.5 8.5 0 013.79 2.28l.09-.06a8.5 8.5 0 0110.33 13.5" fill="url(#a)"/><path d="M6.69 7.23a13 13 0 018.91-3.58 8.47 8.47 0 00-1.49-1.44 14.34 14.34 0 00-4.69 1.1 12.54 12.54 0 00-4.08 2.82 2.76 2.76 0 011.35 1.1zM2.48 10.65a17.86 17.86 0 00-.83 2.62 7.82 7.82 0 00.62.92c.18.23.35.44.55.65a17.94 17.94 0 011.08-3.47 2.76 2.76 0 01-1.42-.72z" fill="#fff" opacity=".6"/><path d="M3.46 6.11a12 12 0 01-.69-2.94 8.15 8.15 0 00-1.1 1.45A12.69 12.69 0 002.24 7a2.69 2.69 0 011.22-.89z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><path d="M8.36 13.67a1.77 1.77 0 01.54-1.27 11.88 11.88 0 01-2.53-1.86 2.74 2.74 0 01-1.49.83 13.1 13.1 0 001.45 1.28 12.12 12.12 0 002.05 1.25 1.79 1.79 0 01-.02-.23zM14.66 13.88a12 12 0 01-2.76-.32.41.41 0 010 .11 1.75 1.75 0 01-.51 1.24 13.69 13.69 0 003.42.24A8.21 8.21 0 0016 13.81a11.5 11.5 0 01-1.34.07z" fill="#f2f2f2" opacity=".55"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/><path d="M12.32 8.93a1.83 1.83 0 01.61-1 25.5 25.5 0 01-4.46-4.14 16.91 16.91 0 01-2-2.92 7.64 7.64 0 00-1.09.42 18.14 18.14 0 002.15 3.18 26.44 26.44 0 004.79 4.46z" fill="#f2f2f2" opacity=".7"/><circle cx="14.18" cy="9.27" r="1.89" fill="url(#d)"/><path d="M17.35 10.54l-.35-.17-.3-.16h-.06l-.26-.21h-.07L16 9.8a1.76 1.76 0 01-.64.92c.12.08.25.15.38.22l.08.05.35.19.86.45a8.63 8.63 0 00.29-1.11z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/></svg>

Before

Width:  |  Height:  |  Size: 2.3 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.7 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="b27f1ad0-7d11-4247-9da3-91bce6211f32" x1="8.798" y1="8.703" x2="14.683" y2="8.703" gradientUnits="userSpaceOnUse"><stop offset="0.001" stop-color="#773adc" /><stop offset="1" stop-color="#552f99" /></linearGradient><linearGradient id="b2f92112-4ca9-4b17-a019-c9f26c1a4a8f" x1="5.764" y1="3.777" x2="5.764" y2="13.78" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a67af4" /><stop offset="0.999" stop-color="#773adc" /></linearGradient></defs><g id="b8a0486a-5501-4d92-b540-a766c4b3b548"><g><g><g><path d="M16.932,11.578a8.448,8.448,0,0,1-7.95,5.59,8.15,8.15,0,0,1-2.33-.33,2.133,2.133,0,0,0,.18-.83c.01,0,.03.01.04.01a7.422,7.422,0,0,0,2.11.3,7.646,7.646,0,0,0,6.85-4.28l.01-.01Z" fill="#32bedd" /><path d="M3.582,14.068a2.025,2.025,0,0,0-.64.56,8.6,8.6,0,0,1-1.67-2.44l1.04.23v.26a.6.6,0,0,0,.47.59l.14.03a6.136,6.136,0,0,0,.62.73Z" fill="#32bedd" /><path d="M12.352.958a2.28,2.28,0,0,0-.27.81c-.02-.01-.05-.02-.07-.03a7.479,7.479,0,0,0-3.03-.63,7.643,7.643,0,0,0-5.9,2.8l-.29.06a.6.6,0,0,0-.48.58v.46l-1.02.19A8.454,8.454,0,0,1,8.982.268,8.6,8.6,0,0,1,12.352.958Z" fill="#32bedd" /><path d="M16.872,5.7l-1.09-.38a6.6,6.6,0,0,0-.72-1.16c-.02-.03-.04-.05-.05-.07a2.083,2.083,0,0,0,.72-.45A7.81,7.81,0,0,1,16.872,5.7Z" fill="#32bedd" /><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="#fff" /><g><g id="e918f286-5032-4942-ad29-ea17e6f1cc90"><path d="M1.1,5.668l1.21-.23v6.55l-1.23-.27-.99-.22a.111.111,0,0,1-.09-.12v-5.4a.12.12,0,0,1,.09-.12Z" fill="#a67af4" /></g><g><g id="a47a99dd-4d47-4c70-8c42-c5ac274ce496"><g><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="url(#b27f1ad0-7d11-4247-9da3-91bce6211f32)" /><path d="M8.586,3.3,2.878,4.378a.177.177,0,0,0-.14.175V12.68a.177.177,0,0,0,.137.174L8.581,14.1a.176.176,0,0,0,.21-.174V3.478A.175.175,0,0,0,8.619,3.3Z" fill="url(#b2f92112-4ca9-4b17-a019-c9f26c1a4a8f)" /></g></g><polygon points="5.948 4.921 5.948 12.483 7.934 12.814 7.934 4.564 5.948 4.921" fill="#b796f9" opacity="0.5" /><polygon points="3.509 5.329 3.509 11.954 5.238 12.317 5.238 5.031 3.509 5.329" fill="#b796f9" opacity="0.5" /></g></g></g><path d="M16,2.048a1.755,1.755,0,1,1-1.76-1.76A1.756,1.756,0,0,1,16,2.048Z" fill="#32bedd" /><circle cx="4.65" cy="15.973" r="1.759" fill="#32bedd" /></g><path d="M18,6.689v3.844a.222.222,0,0,1-.133.2l-.766.316-3.07,1.268-.011,0a.126.126,0,0,1-.038,0,.1.1,0,0,1-.1-.1V5.234a.1.1,0,0,1,.054-.088l0,0,.019,0a.031.031,0,0,1,.019,0,.055.055,0,0,1,.034.008l.011,0,.012,0L17.05,6.2l.8.282A.213.213,0,0,1,18,6.689Z" fill="#773adc" /><path d="M13.959,5.14l-3.8.715a.118.118,0,0,0-.093.117v5.409a.118.118,0,0,0,.091.116l3.8.831a.115.115,0,0,0,.137-.09.109.109,0,0,0,0-.026V5.256a.117.117,0,0,0-.115-.118A.082.082,0,0,0,13.959,5.14Z" fill="#a67af4" /></g></g></svg>

Before

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="-175.993" y1="-343.723" x2="-175.993" y2="-359.232" gradientTransform="matrix(1.156 0 0 1.029 212.573 370.548)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#fea11b"/><stop offset=".284" stop-color="#fea51a"/><stop offset=".547" stop-color="#feb018"/><stop offset=".8" stop-color="#ffc314"/><stop offset="1" stop-color="#ffd70f"/></linearGradient></defs><path d="M5.54 13.105l-.586.588a.267.267 0 01-.377 0L.223 9.353a.533.533 0 010-.755l.588-.59 4.732 4.718a.267.267 0 010 .378z" fill="#50e6ff"/><path d="M4.863 4.305l.59.588a.267.267 0 010 .378L.806 9.932l-.59-.589a.533.533 0 01-.001-.754l4.273-4.285a.267.267 0 01.376 0z" fill="#1490df"/><path d="M17.19 8.012l.588.59a.533.533 0 01-.001.754l-4.354 4.34a.267.267 0 01-.377 0l-.586-.587a.267.267 0 010-.377l4.732-4.718z" fill="#50e6ff"/><path d="M17.782 9.34l-.59.589-4.648-4.662a.267.267 0 010-.377l.59-.588a.267.267 0 01.378 0l4.273 4.286a.533.533 0 010 .753z" fill="#1490df"/><path d="M8.459 9.9H4.87a.193.193 0 01-.2-.181.166.166 0 01.018-.075L8.991 1.13a.206.206 0 01.186-.106h4.245a.193.193 0 01.2.181.165.165 0 01-.035.1L8.534 7.966h4.928a.193.193 0 01.2.181.176.176 0 01-.052.122l-8.189 8.519c-.077.046-.624.5-.356-.189z" fill="url(#a)"/></svg>

Before

Width:  |  Height:  |  Size: 1.3 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><radialGradient id="b" cx="9.36" cy="10.57" r="7.07" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#f2f2f2"/><stop offset=".58" stop-color="#eee"/><stop offset="1" stop-color="#e6e6e6"/></radialGradient><linearGradient id="a" x1="2.59" y1="10.16" x2="15.41" y2="10.16" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#005ba1"/><stop offset=".07" stop-color="#0060a9"/><stop offset=".36" stop-color="#0071c8"/><stop offset=".52" stop-color="#0078d4"/><stop offset=".64" stop-color="#0074cd"/><stop offset=".82" stop-color="#006abb"/><stop offset="1" stop-color="#005ba1"/></linearGradient></defs><path d="M9 5.14c-3.54 0-6.41-1-6.41-2.32v12.36c0 1.27 2.82 2.3 6.32 2.32H9c3.54 0 6.41-1 6.41-2.32V2.82c0 1.29-2.87 2.32-6.41 2.32z" fill="url(#a)"/><path d="M15.41 2.82c0 1.29-2.87 2.32-6.41 2.32s-6.41-1-6.41-2.32S5.46.5 9 .5s6.41 1 6.41 2.32" fill="#e8e8e8"/><path d="M13.92 2.63c0 .82-2.21 1.48-4.92 1.48s-4.92-.66-4.92-1.48S6.29 1.16 9 1.16s4.92.66 4.92 1.47" fill="#50e6ff"/><path d="M9 3a11.55 11.55 0 00-3.89.57A11.42 11.42 0 009 4.11a11.15 11.15 0 003.89-.58A11.84 11.84 0 009 3z" fill="#198ab3"/><path d="M12.9 11.4V8H12v4.13h2.46v-.73zM5.76 9.73a1.83 1.83 0 01-.51-.31.44.44 0 01-.12-.32.34.34 0 01.15-.3.68.68 0 01.42-.12 1.62 1.62 0 011 .29v-.86a2.58 2.58 0 00-1-.16 1.64 1.64 0 00-1.09.34 1.08 1.08 0 00-.42.89c0 .51.32.91 1 1.21a2.88 2.88 0 01.62.36.42.42 0 01.15.32.38.38 0 01-.16.31.81.81 0 01-.45.11 1.66 1.66 0 01-1.09-.42V12a2.17 2.17 0 001.07.24 1.88 1.88 0 001.18-.33 1.08 1.08 0 00.33-.91 1.05 1.05 0 00-.25-.7 2.42 2.42 0 00-.83-.57zM11 11.32a2.34 2.34 0 00.33-1.26A2.32 2.32 0 0011 9a1.81 1.81 0 00-.7-.75 2 2 0 00-1-.26 2.11 2.11 0 00-1.08.27 1.86 1.86 0 00-.73.74 2.46 2.46 0 00-.26 1.14 2.26 2.26 0 00.24 1 1.76 1.76 0 00.69.74 2.06 2.06 0 001 .3l.86 1h1.21L10 12.08a1.79 1.79 0 001-.76zm-1-.25a.94.94 0 01-.76.35.92.92 0 01-.76-.36 1.52 1.52 0 01-.29-1 1.53 1.53 0 01.29-1 1 1 0 01.78-.37.87.87 0 01.75.37 1.62 1.62 0 01.27 1 1.46 1.46 0 01-.28 1.01z" fill="url(#b)"/></svg>

Before

Width:  |  Height:  |  Size: 2.0 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="8.88" y1="12.21" x2="8.88" y2=".21" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#0078d4"/><stop offset=".82" stop-color="#5ea0ef"/></linearGradient><linearGradient id="b" x1="8.88" y1="16.84" x2="8.88" y2="12.21" gradientUnits="userSpaceOnUse"><stop offset=".15" stop-color="#ccc"/><stop offset="1" stop-color="#707070"/></linearGradient></defs><rect x="-.12" y=".21" width="18" height="12" rx=".6" fill="url(#a)"/><path fill="#50e6ff" d="M11.88 4.46v3.49l-3 1.76v-3.5l3-1.75z"/><path fill="#c3f1ff" d="M11.88 4.46l-3 1.76-3-1.76 3-1.75 3 1.75z"/><path fill="#9cebff" d="M8.88 6.22v3.49l-3-1.76V4.46l3 1.76z"/><path fill="#c3f1ff" d="M5.88 7.95l3-1.74v3.5l-3-1.76z"/><path fill="#9cebff" d="M11.88 7.95l-3-1.74v3.5l3-1.76z"/><path d="M12.49 15.84c-1.78-.28-1.85-1.56-1.85-3.63H7.11c0 2.07-.06 3.35-1.84 3.63a1 1 0 00-.89 1h9a1 1 0 00-.89-1z" fill="url(#b)"/></svg>

Before

Width:  |  Height:  |  Size: 973 B

View File

@@ -1,7 +1,11 @@
<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" fill="none"><rect width="100" height="100" fill="url(#a)" rx="20"/><g fill="#fff" fill-rule="evenodd" clip-rule="evenodd" filter="url(#b)"><path d="M11 49.941v-.003l.002-.005.003-.014.007-.035a8.37 8.37 0 0 1 .105-.42c.073-.263.184-.624.348-1.072.328-.896.866-2.135 1.73-3.617 1.732-2.97 4.753-6.883 9.95-10.955 5.223-4.092 10.295-6.293 14.08-7.471a35.328 35.328 0 0 1 4.585-1.114 23.628 23.628 0 0 1 1.687-.223 9.17 9.17 0 0 1 .108-.009l.034-.002h.011l.007-.001s.002 0 .133 2.217c.13 2.218.132 2.218.132 2.218h-.002l-.053.004a19.098 19.098 0 0 0-1.326.178c-.809.136-1.937.37-3.302.763l-.127.043c-2.745.94-6.666 2.775-11.249 6.362-4.572 3.577-7.142 6.95-8.563 9.393-.711 1.222-1.137 2.215-1.383 2.889a9.995 9.995 0 0 0-.29.933c.008.037.022.095.044.173.046.166.123.423.246.76.246.674.672 1.667 1.383 2.89 1.421 2.441 3.991 5.815 8.563 9.392 4.584 3.587 8.504 5.423 11.25 6.362l.126.043c1.365.393 2.493.627 3.302.763a19.098 19.098 0 0 0 1.326.178l.053.004h.002s-.002 0-.133 2.218C43.66 75 43.657 75 43.657 75h-.007l-.011-.001-.034-.002a9.17 9.17 0 0 1-.478-.046 23.628 23.628 0 0 1-1.317-.186 35.328 35.328 0 0 1-4.584-1.114c-3.786-1.178-8.858-3.38-14.081-7.471-5.197-4.072-8.218-7.985-9.95-10.955-.864-1.482-1.402-2.72-1.73-3.617-.164-.448-.275-.81-.348-1.072a8.37 8.37 0 0 1-.105-.42l-.007-.035-.003-.014-.002-.005v-.121Zm78 0v-.003l-.002-.005-.002-.014-.008-.035a8.532 8.532 0 0 0-.105-.42 14.049 14.049 0 0 0-.348-1.072c-.328-.896-.866-2.135-1.73-3.617-1.732-2.97-4.753-6.883-9.95-10.955-5.223-4.092-10.295-6.293-14.08-7.471a35.328 35.328 0 0 0-4.585-1.114 23.628 23.628 0 0 0-1.687-.223 9.17 9.17 0 0 0-.108-.009l-.034-.002h-.011L56.343 25s-.002 0-.133 2.217c-.13 2.218-.132 2.218-.132 2.218h.002l.053.004a19.098 19.098 0 0 1 1.326.178c.809.136 1.937.37 3.302.763l.127.043c2.745.94 6.666 2.775 11.249 6.362 4.572 3.577 7.141 6.95 8.563 9.393.711 1.222 1.137 2.215 1.383 2.889a9.995 9.995 0 0 1 .29.933 9.995 9.995 0 0 1-.29.934c-.246.673-.672 1.666-1.383 2.888-1.422 2.442-3.991 5.816-8.563 9.393-4.584 3.587-8.504 5.423-11.25 6.362l-.126.043a30.108 30.108 0 0 1-3.302.763 19.098 19.098 0 0 1-1.326.178l-.053.004h-.002s.002 0 .133 2.218C56.34 75 56.343 75 56.343 75h.007l.011-.001.034-.002a9.17 9.17 0 0 0 .478-.046c.314-.034.758-.092 1.317-.186a35.328 35.328 0 0 0 4.584-1.114c3.786-1.178 8.858-3.38 14.081-7.471 5.197-4.072 8.218-7.985 9.95-10.955.864-1.482 1.402-2.72 1.73-3.617.164-.448.275-.81.348-1.072a8.532 8.532 0 0 0 .105-.42l.008-.035.002-.014.001-.005.001-.003v-.118Z"/><path d="M68.342 49.998c0 9.846-7.924 17.827-17.7 17.827-9.775 0-17.7-7.981-17.7-17.827 0-9.846 7.925-17.827 17.7-17.827 9.776 0 17.7 7.981 17.7 17.827ZM46.218 39.97s-2.127 2.508-2.766 4.457c-.412 1.257-.553 3.343-.553 3.343h-5.531s0-1.672 1.106-4.457c1.106-2.786 2.212-3.343 2.212-3.343h5.532Zm-2.766 15.6c.639 1.949 2.766 4.457 2.766 4.457h-5.532s-1.106-.557-2.212-3.343c-1.106-2.785-1.106-4.457-1.106-4.457h5.53s.142 2.086.554 3.343Z"/></g><defs><radialGradient id="a" cx="0" cy="0" r="1" gradientTransform="matrix(40.99997 42 -42 40.99997 50 50)" gradientUnits="userSpaceOnUse"><stop offset=".33" stop-color="#F76526"/><stop offset="1" stop-color="#F43030"/></radialGradient><filter id="b" width="90" height="62" x="5" y="23" color-interpolation-filters="sRGB" filterUnits="userSpaceOnUse"><feFlood flood-opacity="0" result="BackgroundImageFix"/><feColorMatrix in="SourceAlpha" result="hardAlpha" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/><feOffset dy="4"/><feGaussianBlur stdDeviation="3"/><feComposite in2="hardAlpha" operator="out"/><feColorMatrix values="0 0 0 0 0.368384 0 0 0 0 0.0623777 0 0 0 0 0.0623777 0 0 0 0.25 0"/><feBlend in2="BackgroundImageFix" result="effect1_dropShadow_3909_18731"/><feBlend in="SourceGraphic" in2="effect1_dropShadow_3909_18731" result="shape"/><feColorMatrix in="SourceAlpha" result="hardAlpha" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/><feOffset dy="4"/><feGaussianBlur stdDeviation="3"/><feComposite in2="hardAlpha" k2="-1" k3="1" operator="arithmetic"/><feColorMatrix values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.06 0"/><feBlend in2="shape" result="effect2_innerShadow_3909_18731"/></filter></defs></svg> <svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_2048_2251)">
<path opacity="0.9" d="M8.02226 15.9866C3.56539 15.9866 -6.10352e-05 12.4896 -6.10352e-05 8.11832C-6.10352e-05 3.79075 3.56539 0.25 8.02226 0.25H13.0584C14.7075 0.25 15.9999 1.56139 15.9999 3.13506V8.11832C15.9999 12.4896 12.4345 15.9866 8.02226 15.9866Z" fill="#F25733"/>
<path d="M7.95919 4.71207C4.63025 4.71207 2.75514 7.46868 2.67693 7.58603C2.48413 7.87508 2.48413 8.24888 2.67707 8.53816C2.75514 8.65528 4.63025 11.4119 7.95919 11.4119C11.2881 11.4119 13.1633 8.65528 13.2414 8.53792C13.4342 8.24888 13.4342 7.87508 13.2413 7.58582C13.1632 7.46868 11.2881 4.71207 7.95919 4.71207ZM3.13771 8.23088C3.06925 8.12832 3.06925 7.99571 3.13771 7.89307C3.20059 7.79867 4.53564 5.83764 6.92256 5.36723C5.84092 5.78476 5.07127 6.83485 5.07127 8.062C5.07127 9.28912 5.84092 10.3392 6.92256 10.7567C4.53564 10.2863 3.20059 8.32528 3.13771 8.23088ZM6.62838 8.062C6.62838 8.21488 6.50443 8.3388 6.35151 8.3388C6.19859 8.3388 6.07465 8.21488 6.07465 8.062C6.07465 7.02287 6.92003 6.17748 7.95916 6.17748C8.11207 6.17748 8.23599 6.30141 8.23599 6.45434C8.23599 6.60727 8.11207 6.73119 7.95916 6.73119C7.22535 6.73119 6.62838 7.32815 6.62838 8.062ZM7.95919 8.73504C7.58803 8.73504 7.2861 8.43312 7.2861 8.062C7.2861 7.69085 7.58803 7.3889 7.95919 7.3889C8.33039 7.3889 8.63231 7.69083 8.63231 8.062C8.63231 8.43312 8.33039 8.73504 7.95919 8.73504ZM12.7806 8.23088C12.7178 8.32528 11.3827 10.2863 8.99583 10.7567C10.0775 10.3392 10.8471 9.28912 10.8471 8.062C10.8471 6.83487 10.0775 5.78477 8.99583 5.36724C11.3827 5.83768 12.7178 7.7987 12.7806 7.89307C12.8491 7.99571 12.8491 8.12832 12.7806 8.23088Z" fill="#F9F2F9"/>
</g>
<defs>
<clipPath id="clip0_2048_2251">
<rect width="16" height="16" fill="white"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 4.1 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

@@ -1 +0,0 @@
.uplot, .uplot *, .uplot *::before, .uplot *::after {box-sizing: border-box;}.uplot {font-family: system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";line-height: 1.5;width: min-content;}.u-title {text-align: center;font-size: 18px;font-weight: bold;}.u-wrap {position: relative;user-select: none;}.u-over, .u-under {position: absolute;}.u-under {overflow: hidden;}.uplot canvas {display: block;position: relative;width: 100%;height: 100%;}.u-axis {position: absolute;}.u-legend {font-size: 14px;margin: auto;text-align: center;}.u-inline {display: block;}.u-inline * {display: inline-block;}.u-inline tr {margin-right: 16px;}.u-legend th {font-weight: 600;}.u-legend th > * {vertical-align: middle;display: inline-block;}.u-legend .u-marker {width: 1em;height: 1em;margin-right: 4px;background-clip: padding-box !important;}.u-inline.u-live th::after {content: ":";vertical-align: middle;}.u-inline:not(.u-live) .u-value {display: none;}.u-series > * {padding: 4px;}.u-series th {cursor: pointer;}.u-legend .u-off > * {opacity: 0.3;}.u-select {background: rgba(0,0,0,0.07);position: absolute;pointer-events: none;}.u-cursor-x, .u-cursor-y {position: absolute;left: 0;top: 0;pointer-events: none;will-change: transform;}.u-hz .u-cursor-x, .u-vt .u-cursor-y {height: 100%;border-right: 1px dashed #607D8B;}.u-hz .u-cursor-y, .u-vt .u-cursor-x {width: 100%;border-bottom: 1px dashed #607D8B;}.u-cursor-pt {position: absolute;top: 0;left: 0;border-radius: 50%;border: 0 solid;pointer-events: none;will-change: transform;/*this has to be !important since we set inline "background" shorthand */background-clip: padding-box !important;}.u-axis.u-off, .u-select.u-off, .u-cursor-x.u-off, .u-cursor-y.u-off, .u-cursor-pt.u-off {display: none;}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

@@ -53,10 +53,8 @@
"option_atleastonce": "at least once", "option_atleastonce": "at least once",
"option_onaverage": "on average", "option_onaverage": "on average",
"option_intotal": "in total", "option_intotal": "in total",
"option_last": "last",
"option_above": "above", "option_above": "above",
"option_below": "below", "option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to", "option_equal": "is equal to",
"option_notequal": "not equal to", "option_notequal": "not equal to",
"button_query": "Query", "button_query": "Query",
@@ -111,8 +109,6 @@
"choose_alert_type": "Choose a type for the alert", "choose_alert_type": "Choose a type for the alert",
"metric_based_alert": "Metric based Alert", "metric_based_alert": "Metric based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.", "metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"anomaly_based_alert": "Anomaly based Alert",
"anomaly_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"log_based_alert": "Log-based Alert", "log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.", "log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert", "traces_based_alert": "Trace-based Alert",
@@ -121,8 +117,6 @@
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.", "exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
"field_unit": "Threshold unit", "field_unit": "Threshold unit",
"text_alert_on_absent": "Send a notification if data is missing for", "text_alert_on_absent": "Send a notification if data is missing for",
"text_require_min_points": "Run alert evaluation only when there are minimum of",
"text_num_points": "data points in each result group",
"text_alert_frequency": "Run alert every", "text_alert_frequency": "Run alert every",
"text_for": "minutes", "text_for": "minutes",
"selected_query_placeholder": "Select query" "selected_query_placeholder": "Select query"

View File

@@ -1,12 +0,0 @@
{
"workspaceSuspended": "Your workspace is locked",
"gotQuestions": "Got Questions?",
"contactUs": "Contact Us",
"actionHeader": "Pay to continue",
"actionDescription": "Pay now to keep enjoying all the great features youve been using.",
"yourDataIsSafe": "Your data is safe with us until",
"actNow": "Act now to avoid any disruptions and continue where you left off.",
"contactAdmin": "Contact your admin to proceed with the upgrade.",
"continueMyJourney": "Settle your bill to continue",
"somethingWentWrong": "Something went wrong"
}

View File

@@ -1,8 +0,0 @@
{
"containers_visualization_message": "The ability to visualise containers is in active development and should be available to you soon.",
"processes_visualization_message": "The ability to visualise processes is in active development and should be available to you soon.",
"working_message": "We're working to extend infrastructure monitoring to take care of a bunch of different cases. Thank you for your patience.",
"waitlist_message": "Join the waitlist for early access.",
"waitlist_success_message": "We have received your request for early access. We will get back to you as soon as we launch the feature.",
"contact_support": "Contact Support"
}

View File

@@ -1,24 +0,0 @@
{
"metricGraphCategory": {
"brokerMetrics": {
"title": "Broker Metrics",
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
},
"consumerMetrics": {
"title": "Consumer Metrics",
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
},
"producerMetrics": {
"title": "Producer Metrics",
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
},
"brokerJVMMetrics": {
"title": "Broker JVM Metrics",
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
},
"partitionMetrics": {
"title": "Partition Metrics",
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
}
}
}

View File

@@ -1,54 +0,0 @@
{
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details",
"consumer": {
"title": "Consumer lag view",
"description": "Connect and Monitor Your Data Streams"
},
"producer": {
"title": "Producer latency view",
"description": "Connect and Monitor Your Data Streams"
},
"partition": {
"title": "Partition Latency view",
"description": "Connect and Monitor Your Data Streams"
},
"dropRate": {
"title": "Drop Rate view",
"description": "Connect and Monitor Your Data Streams"
},
"metricPage": {
"title": "Metric View",
"description": "Connect and Monitor Your Data Streams"
}
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
},
"overviewSummarySection": {
"title": "Monitor Your Data Streams",
"subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time."
}
}

View File

@@ -1,8 +0,0 @@
{
"invite_user": "Invite your teammates",
"invite": "Invite",
"skip": "Skip",
"invite_user_helper_text": "Not the right person to get started? No worries! Invite someone who can.",
"select_use_case": "Select a use-case to get started",
"get_started": "Get Started"
}

View File

@@ -40,10 +40,8 @@
"option_atleastonce": "at least once", "option_atleastonce": "at least once",
"option_onaverage": "on average", "option_onaverage": "on average",
"option_intotal": "in total", "option_intotal": "in total",
"option_last": "last",
"option_above": "above", "option_above": "above",
"option_below": "below", "option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to", "option_equal": "is equal to",
"option_notequal": "not equal to", "option_notequal": "not equal to",
"button_query": "Query", "button_query": "Query",

View File

@@ -1,3 +1,3 @@
{ {
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or " "rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
} }

View File

@@ -37,10 +37,6 @@
"PASSWORD_RESET": "SigNoz | Password Reset", "PASSWORD_RESET": "SigNoz | Password Reset",
"LIST_LICENSES": "SigNoz | List of Licenses", "LIST_LICENSES": "SigNoz | List of Licenses",
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked", "WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
"WORKSPACE_SUSPENDED": "SigNoz | Workspace Suspended",
"SUPPORT": "SigNoz | Support", "SUPPORT": "SigNoz | Support",
"DEFAULT": "Open source Observability Platform | SigNoz", "DEFAULT": "Open source Observability Platform | SigNoz"
"ALERT_HISTORY": "SigNoz | Alert Rule History",
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
"INFRASTRUCTURE_MONITORING_HOSTS": "SigNoz | Infra Monitoring"
} }

View File

@@ -1,22 +0,0 @@
{
"trialPlanExpired": "Trial Plan Expired",
"gotQuestions": "Got Questions?",
"contactUs": "Contact Us",
"upgradeToContinue": "Upgrade to Continue",
"upgradeNow": "Upgrade now to keep enjoying all the great features youve been using.",
"yourDataIsSafe": "Your data is safe with us until",
"actNow": "Act now to avoid any disruptions and continue where you left off.",
"contactAdmin": "Contact your admin to proceed with the upgrade.",
"continueMyJourney": "Continue My Journey",
"needMoreTime": "Need More Time?",
"extendTrial": "Extend Trial",
"extendTrialMsgPart1": "If you have a specific reason why you were not able to finish your PoC in the trial period, please write to us on",
"extendTrialMsgPart2": "with the reason. Sometimes we can extend trial by a few days on a case by case basis",
"whyChooseSignoz": "Why choose Signoz",
"enterpriseGradeObservability": "Enterprise-grade Observability",
"observabilityDescription": "Get access to observability at any scale with advanced security and compliance.",
"continueToUpgrade": "Continue to Upgrade",
"youAreInGoodCompany": "You are in good company",
"faqs": "FAQs",
"somethingWentWrong": "Something went wrong"
}

View File

@@ -13,12 +13,9 @@
"button_no": "No", "button_no": "No",
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?", "remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
"remove_label_success": "Labels cleared", "remove_label_success": "Labels cleared",
"alert_form_step1": "Choose a detection method", "alert_form_step1": "Step 1 - Define the metric",
"alert_form_step2": "Define the metric", "alert_form_step2": "Step 2 - Define Alert Conditions",
"alert_form_step3": "Define Alert Conditions", "alert_form_step3": "Step 3 - Alert Configuration",
"alert_form_step4": "Alert Configuration",
"threshold_alert_desc": "An alert is triggered whenever a metric deviates from an expected threshold.",
"anomaly_detection_alert_desc": "An alert is triggered whenever a metric deviates from an expected pattern.",
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries", "metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
"confirm_save_title": "Save Changes", "confirm_save_title": "Save Changes",
"confirm_save_content_part1": "Your alert built with", "confirm_save_content_part1": "Your alert built with",
@@ -38,7 +35,6 @@
"button_cancelchanges": "Cancel", "button_cancelchanges": "Cancel",
"button_discard": "Discard", "button_discard": "Discard",
"text_condition1": "Send a notification when", "text_condition1": "Send a notification when",
"text_condition1_anomaly": "Send notification when the observed value for",
"text_condition2": "the threshold", "text_condition2": "the threshold",
"text_condition3": "during the last", "text_condition3": "during the last",
"option_1min": "1 min", "option_1min": "1 min",
@@ -57,10 +53,8 @@
"option_atleastonce": "at least once", "option_atleastonce": "at least once",
"option_onaverage": "on average", "option_onaverage": "on average",
"option_intotal": "in total", "option_intotal": "in total",
"option_last": "last",
"option_above": "above", "option_above": "above",
"option_below": "below", "option_below": "below",
"option_above_below": "above/below",
"option_equal": "is equal to", "option_equal": "is equal to",
"option_notequal": "not equal to", "option_notequal": "not equal to",
"button_query": "Query", "button_query": "Query",
@@ -114,9 +108,7 @@
"user_tooltip_more_help": "More details on how to create alerts", "user_tooltip_more_help": "More details on how to create alerts",
"choose_alert_type": "Choose a type for the alert", "choose_alert_type": "Choose a type for the alert",
"metric_based_alert": "Metric based Alert", "metric_based_alert": "Metric based Alert",
"anomaly_based_alert": "Anomaly based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.", "metric_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"anomaly_based_alert_desc": "Send a notification when a condition occurs in the metric data.",
"log_based_alert": "Log-based Alert", "log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.", "log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert", "traces_based_alert": "Trace-based Alert",
@@ -125,8 +117,6 @@
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.", "exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
"field_unit": "Threshold unit", "field_unit": "Threshold unit",
"text_alert_on_absent": "Send a notification if data is missing for", "text_alert_on_absent": "Send a notification if data is missing for",
"text_require_min_points": "Run alert evaluation only when there are minimum of",
"text_num_points": "data points in each result group",
"text_alert_frequency": "Run alert every", "text_alert_frequency": "Run alert every",
"text_for": "minutes", "text_for": "minutes",
"selected_query_placeholder": "Select query" "selected_query_placeholder": "Select query"

Some files were not shown because too many files have changed in this diff Show More