Compare commits
13 Commits
chore/tf/t
...
tpapi-demo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a13d1c8954 | ||
|
|
a564e5fd21 | ||
|
|
d85b8217e7 | ||
|
|
c9d2e381a8 | ||
|
|
57778cafba | ||
|
|
a056fa7a6f | ||
|
|
1747868d76 | ||
|
|
5497464548 | ||
|
|
aac2647b67 | ||
|
|
785a0f2a48 | ||
|
|
b46adb5927 | ||
|
|
ae87499679 | ||
|
|
f0b0889d2e |
@@ -1,69 +0,0 @@
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: clickhouse
|
||||
volumes:
|
||||
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
|
||||
- ${PWD}/fs/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
|
||||
- ${PWD}/fs/tmp/var/lib/clickhouse/:/var/lib/clickhouse/
|
||||
- ${PWD}/fs/tmp/var/lib/clickhouse/user_scripts/:/var/lib/clickhouse/user_scripts/
|
||||
ports:
|
||||
- '127.0.0.1:8123:8123'
|
||||
- '127.0.0.1:9000:9000'
|
||||
tty: true
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- zookeeper
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: zookeeper
|
||||
volumes:
|
||||
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
|
||||
ports:
|
||||
- '127.0.0.1:2181:2181'
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:v0.111.42
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --cluster-name=cluster
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --replication=true
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:v0.111.42
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --cluster-name=cluster
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --replication=true
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
restart: on-failure
|
||||
@@ -1,47 +0,0 @@
|
||||
<clickhouse replace="true">
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
</formatting>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>3</count>
|
||||
</logger>
|
||||
<display_name>cluster</display_name>
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<path>users.xml</path>
|
||||
</users_xml>
|
||||
<local_directory>
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
</user_directories>
|
||||
<distributed_ddl>
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
</distributed_ddl>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
<zookeeper>
|
||||
<node>
|
||||
<host>zookeeper</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>01</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
||||
@@ -1,36 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse replace="true">
|
||||
<profiles>
|
||||
<default>
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
<load_balancing>in_order</load_balancing>
|
||||
<log_queries>1</log_queries>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<profile>default</profile>
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<quota>default</quota>
|
||||
<access_management>1</access_management>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
<show_named_collections>1</show_named_collections>
|
||||
<show_named_collections_secrets>1</show_named_collections_secrets>
|
||||
</default>
|
||||
</users>
|
||||
<quotas>
|
||||
<default>
|
||||
<interval>
|
||||
<duration>3600</duration>
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -1,27 +0,0 @@
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:15
|
||||
container_name: postgres
|
||||
environment:
|
||||
POSTGRES_DB: signoz
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: password
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-d",
|
||||
"signoz",
|
||||
"-U",
|
||||
"postgres"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 30s
|
||||
retries: 3
|
||||
restart: on-failure
|
||||
ports:
|
||||
- "127.0.0.1:5432:5432/tcp"
|
||||
volumes:
|
||||
- ${PWD}/fs/tmp/var/lib/postgresql/data/:/var/lib/postgresql/data/
|
||||
@@ -1,7 +1,6 @@
|
||||
.git
|
||||
.github
|
||||
.vscode
|
||||
.devenv
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
|
||||
9
.github/CODEOWNERS
vendored
9
.github/CODEOWNERS
vendored
@@ -2,14 +2,9 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
|
||||
/frontend/ @SigNoz/frontend @YounixM
|
||||
/frontend/ @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/deploy/ @SigNoz/devops
|
||||
/sample-apps/ @SigNoz/devops
|
||||
.github @SigNoz/devops
|
||||
/pkg/config/ @grandwizard28
|
||||
/pkg/errors/ @grandwizard28
|
||||
/pkg/factory/ @grandwizard28
|
||||
/pkg/types/ @grandwizard28
|
||||
/pkg/sqlmigration/ @vikrantgupta25
|
||||
.golangci.yml @grandwizard28
|
||||
|
||||
73
.github/pull_request_template.md
vendored
73
.github/pull_request_template.md
vendored
@@ -1,72 +1,17 @@
|
||||
## 📄 Summary
|
||||
### Summary
|
||||
|
||||
<!-- Describe the purpose of the PR in a few sentences. What does it fix/add/update? -->
|
||||
<!-- ✍️ A clear and concise description...-->
|
||||
|
||||
---
|
||||
#### Related Issues / PR's
|
||||
|
||||
## ✅ Changes
|
||||
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
|
||||
|
||||
- [ ] Feature: Brief description
|
||||
- [ ] Bug fix: Brief description
|
||||
#### Screenshots
|
||||
|
||||
---
|
||||
NA
|
||||
|
||||
## 🏷️ Required: Add Relevant Labels
|
||||
<!-- ✍️ Add screenshots of before and after changes where applicable-->
|
||||
|
||||
> ⚠️ **Manually add appropriate labels in the PR sidebar**
|
||||
Please select one or more labels (as applicable):
|
||||
#### Affected Areas and Manually Tested Areas
|
||||
|
||||
ex:
|
||||
|
||||
- `frontend`
|
||||
- `backend`
|
||||
- `devops`
|
||||
- `bug`
|
||||
- `enhancement`
|
||||
- `ui`
|
||||
- `test`
|
||||
|
||||
---
|
||||
|
||||
## 👥 Reviewers
|
||||
|
||||
> Tag the relevant teams for review:
|
||||
|
||||
- frontend / backend / devops
|
||||
|
||||
---
|
||||
|
||||
## 🧪 How to Test
|
||||
|
||||
<!-- Describe how reviewers can test this PR -->
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Related Issues
|
||||
|
||||
<!-- Reference any related issues (e.g. Fixes #123, Closes #456) -->
|
||||
Closes #
|
||||
|
||||
---
|
||||
|
||||
## 📸 Screenshots / Screen Recording (if applicable / mandatory for UI related changes)
|
||||
|
||||
<!-- Add screenshots or GIFs to help visualize changes -->
|
||||
|
||||
---
|
||||
|
||||
## 📋 Checklist
|
||||
|
||||
- [ ] Dev Review
|
||||
- [ ] Test cases added (Unit/ Integration / E2E)
|
||||
- [ ] Manually tested the changes
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 👀 Notes for Reviewers
|
||||
|
||||
<!-- Anything reviewers should keep in mind while reviewing -->
|
||||
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
|
||||
|
||||
42
.github/workflows/README.md
vendored
Normal file
42
.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# Github actions
|
||||
|
||||
## Testing the UI manually on each PR
|
||||
|
||||
First we need to make sure the UI is ready
|
||||
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
|
||||
* This job will run until the PR is merged or closed to keep the local tunneling alive
|
||||
- github will cancel this job if the PR wasn't merged after 6h
|
||||
- if the job was cancel, go to the action and press `Re-run all jobs`
|
||||
|
||||
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th> Variables </th>
|
||||
<th> Description </th>
|
||||
<th> Example </th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> REPONAME </td>
|
||||
<td> Provide the DockerHub user/organisation name of the image. </td>
|
||||
<td> signoz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> DOCKERHUB_USERNAME </td>
|
||||
<td> Docker hub username </td>
|
||||
<td> signoz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> DOCKERHUB_TOKEN </td>
|
||||
<td> Docker hub password/token with push permission </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> SONAR_TOKEN </td>
|
||||
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
82
.github/workflows/build-community.yaml
vendored
82
.github/workflows/build-community.yaml
vendored
@@ -1,82 +0,0 @@
|
||||
name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
116
.github/workflows/build-enterprise.yaml
vendored
116
.github/workflows/build-enterprise.yaml
vendored
@@ -1,116 +0,0 @@
|
||||
name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: enterprise-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
127
.github/workflows/build-staging.yaml
vendored
127
.github/workflows/build-staging.yaml
vendored
@@ -1,127 +0,0 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.NP_TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.NP_PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.NP_APPCUES_APP_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: staging-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: staging-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
48
.github/workflows/build.yaml
vendored
Normal file
48
.github/workflows/build.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-frontend-amd64
|
||||
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- name: Build query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-query-service-amd64
|
||||
|
||||
build-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- name: Build EE query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-ee-query-service-amd64
|
||||
12
.github/workflows/commitci.yaml
vendored
12
.github/workflows/commitci.yaml
vendored
@@ -25,3 +25,15 @@ jobs:
|
||||
else
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"
|
||||
fi
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: lint
|
||||
uses: wagoid/commitlint-github-action@v5
|
||||
|
||||
39
.github/workflows/goci.yaml
vendored
39
.github/workflows/goci.yaml
vendored
@@ -18,7 +18,6 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_TEST_CONTEXT: ./...
|
||||
GO_VERSION: 1.23
|
||||
fmt:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -27,7 +26,6 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -36,40 +34,3 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
deps:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
uses: signoz/primus.workflows/.github/workflows/go-deps.yaml@main
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
build:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: go-install
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
- name: qemu-install
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: aarch64-install
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: docker-community
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-community
|
||||
- name: docker-enterprise
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-enterprise
|
||||
|
||||
155
.github/workflows/gor-signoz-community.yaml
vendored
155
.github/workflows/gor-signoz-community.yaml
vendored
@@ -1,155 +0,0 @@
|
||||
name: gor-signoz-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: get-sha
|
||||
shell: bash
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: build-frontend
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: community-frontend-build-${{ env.sha_short }}
|
||||
path: frontend/build
|
||||
build:
|
||||
needs: prepare
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
env:
|
||||
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: ghcr-login
|
||||
uses: docker/login-action@v3
|
||||
if: matrix.os != 'macos-latest'
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: get-sha
|
||||
shell: bash
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: download-frontend-artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: community-frontend-build-${{ env.sha_short }}
|
||||
path: frontend/build
|
||||
- name: cache-linux
|
||||
uses: actions/cache@v4
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
path: dist/linux
|
||||
key: signoz-community-linux-${{ env.sha_short }}
|
||||
- name: cache-darwin
|
||||
uses: actions/cache@v4
|
||||
if: matrix.os == 'macos-latest'
|
||||
with:
|
||||
path: dist/darwin
|
||||
key: signoz-community-darwin-${{ env.sha_short }}
|
||||
- name: release
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: '~> v2'
|
||||
args: release --config ${{ env.CONFIG_PATH }} --clean --split
|
||||
workdir: .
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
WORKDIR: pkg/query-service
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: cosign-installer
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
- name: download-syft
|
||||
uses: anchore/sbom-action/download-syft@v0.18.0
|
||||
- name: ghcr-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
shell: bash
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: cache-linux
|
||||
id: cache-linux
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: dist/linux
|
||||
key: signoz-community-linux-${{ env.sha_short }}
|
||||
- name: cache-darwin
|
||||
id: cache-darwin
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: dist/darwin
|
||||
key: signoz-community-darwin-${{ env.sha_short }}
|
||||
|
||||
# release
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: '~> v2'
|
||||
args: continue --merge
|
||||
workdir: .
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
168
.github/workflows/gor-signoz.yaml
vendored
168
.github/workflows/gor-signoz.yaml
vendored
@@ -1,168 +0,0 @@
|
||||
name: gor-signoz
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: get-sha
|
||||
shell: bash
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: dotenv-frontend
|
||||
working-directory: frontend
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > .env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> .env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> .env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> .env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> .env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> .env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> .env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> .env
|
||||
- name: build-frontend
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: frontend-build-${{ env.sha_short }}
|
||||
path: frontend/build
|
||||
build:
|
||||
needs: prepare
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
env:
|
||||
CONFIG_PATH: ee/query-service/.goreleaser.yaml
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: ghcr-login
|
||||
uses: docker/login-action@v3
|
||||
if: matrix.os != 'macos-latest'
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: get-sha
|
||||
shell: bash
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: download-frontend-artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: frontend-build-${{ env.sha_short }}
|
||||
path: frontend/build
|
||||
- name: cache-linux
|
||||
uses: actions/cache@v4
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
path: dist/linux
|
||||
key: signoz-linux-${{ env.sha_short }}
|
||||
- name: cache-darwin
|
||||
uses: actions/cache@v4
|
||||
if: matrix.os == 'macos-latest'
|
||||
with:
|
||||
path: dist/darwin
|
||||
key: signoz-darwin-${{ env.sha_short }}
|
||||
- name: release
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: '~> v2'
|
||||
args: release --config ${{ env.CONFIG_PATH }} --clean --split
|
||||
workdir: .
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: cosign-installer
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
- name: download-syft
|
||||
uses: anchore/sbom-action/download-syft@v0.18.0
|
||||
- name: ghcr-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
shell: bash
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: cache-linux
|
||||
id: cache-linux
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: dist/linux
|
||||
key: signoz-linux-${{ env.sha_short }}
|
||||
- name: cache-darwin
|
||||
id: cache-darwin
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: dist/darwin
|
||||
key: signoz-darwin-${{ env.sha_short }}
|
||||
|
||||
# release
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: '~> v2'
|
||||
args: continue --merge
|
||||
workdir: .
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
@@ -1,8 +1,9 @@
|
||||
name: gor-histogramquantile
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
- histogram-quantile/v*
|
||||
|
||||
permissions:
|
||||
53
.github/workflows/integrationci.yaml
vendored
53
.github/workflows/integrationci.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: integrationci
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
src:
|
||||
- bootstrap
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 24.1.2-alpine
|
||||
- 24.12-alpine
|
||||
schema-migrator-version:
|
||||
- v0.111.38
|
||||
postgres-version:
|
||||
- 15
|
||||
if: |
|
||||
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.13
|
||||
- name: poetry
|
||||
run: |
|
||||
python -m pip install poetry==2.1.2
|
||||
python -m poetry config virtualenvs.in-project true
|
||||
cd tests/integration && poetry install --no-root
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
poetry run pytest \
|
||||
--basetemp=./tmp/ \
|
||||
src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
--postgres-version ${{matrix.postgres-version}} \
|
||||
--clickhouse-version ${{matrix.clickhouse-version}} \
|
||||
--schema-migrator-version ${{matrix.schema-migrator-version}}
|
||||
4
.github/workflows/prereleaser.yaml
vendored
4
.github/workflows/prereleaser.yaml
vendored
@@ -1,9 +1,9 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
|
||||
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||
schedule:
|
||||
- cron: '30 6 * * 3'
|
||||
- cron: '30 9 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
|
||||
209
.github/workflows/push.yaml
vendored
Normal file
209
.github/workflows/push.yaml
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
name: push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
image-build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Install cross-compilation tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: Build and push docker image
|
||||
run: make build-push-query-service
|
||||
|
||||
image-build-and-push-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Create .env file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Install cross-compilation tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: Build and push docker image
|
||||
run: make build-push-ee-query-service
|
||||
|
||||
image-build-and-push-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
- name: Run Prettier
|
||||
working-directory: frontend
|
||||
run: npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
working-directory: frontend
|
||||
run: npm run lint
|
||||
continue-on-error: true
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-frontend
|
||||
|
||||
image-build-and-push-frontend-ee:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Create .env file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
- name: Run Prettier
|
||||
working-directory: frontend
|
||||
run: npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
working-directory: frontend
|
||||
run: npm run lint
|
||||
continue-on-error: true
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-frontend
|
||||
16
.github/workflows/remove-label.yaml
vendored
Normal file
16
.github/workflows/remove-label.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: remove-label
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [synchronize]
|
||||
|
||||
jobs:
|
||||
remove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove label testing-deploy from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
with:
|
||||
label: testing-deploy
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
56
.github/workflows/staging-deployment.yaml
vendored
Normal file
56
.github/workflows/staging-deployment.yaml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
56
.github/workflows/testing-deployment.yaml
vendored
Normal file
56
.github/workflows/testing-deployment.yaml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: testing-deployment
|
||||
# Trigger deployment only on testing-deploy label on pull request
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy PR branch to testing
|
||||
runs-on: ubuntu-latest
|
||||
environment: testing
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
git checkout --track origin/${GITHUB_BRANCH}
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
157
.gitignore
vendored
157
.gitignore
vendored
@@ -54,19 +54,19 @@ ee/query-service/tests/test-deploy/data/
|
||||
bin/
|
||||
.local/
|
||||
*/query-service/queries.active
|
||||
ee/query-service/db
|
||||
|
||||
# e2e
|
||||
|
||||
e2e/node_modules/
|
||||
e2e/test-results/
|
||||
e2e/playwright-report/
|
||||
e2e/blob-report/
|
||||
e2e/playwright/.cache/
|
||||
e2e/.auth
|
||||
|
||||
# go
|
||||
vendor/
|
||||
**/main/**
|
||||
__debug_bin**
|
||||
|
||||
# git-town
|
||||
.git-branches.toml
|
||||
@@ -77,155 +77,4 @@ dist/
|
||||
# ignore user_scripts that is fetched by init-clickhouse
|
||||
deploy/common/clickhouse/user_scripts/
|
||||
|
||||
queries.active
|
||||
|
||||
# tmp
|
||||
**/tmp/**
|
||||
|
||||
# .devenv tmp files
|
||||
.devenv/**/tmp/**
|
||||
.qodo
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
### Python Patch ###
|
||||
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python
|
||||
queries.active
|
||||
@@ -16,8 +16,10 @@ tasks:
|
||||
yarn dev
|
||||
|
||||
ports:
|
||||
- port: 8080
|
||||
- port: 3301
|
||||
onOpen: open-browser
|
||||
- port: 8080
|
||||
onOpen: ignore
|
||||
- port: 9000
|
||||
onOpen: ignore
|
||||
- port: 8123
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
linters:
|
||||
default: standard
|
||||
enable:
|
||||
- bodyclose
|
||||
- misspell
|
||||
- nilnil
|
||||
- sloglint
|
||||
- depguard
|
||||
- iface
|
||||
|
||||
linters-settings:
|
||||
sloglint:
|
||||
no-mixed-args: true
|
||||
kv-only: true
|
||||
no-global: all
|
||||
context: all
|
||||
static-msg: true
|
||||
msg-style: lowercased
|
||||
key-naming-case: snake
|
||||
depguard:
|
||||
rules:
|
||||
nozap:
|
||||
deny:
|
||||
- pkg: "go.uber.org/zap"
|
||||
desc: "Do not use zap logger. Use slog instead."
|
||||
iface:
|
||||
enable:
|
||||
- identical
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- "pkg/query-service"
|
||||
- "ee/query-service"
|
||||
- "scripts/"
|
||||
@@ -1,17 +0,0 @@
|
||||
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
|
||||
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
|
||||
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
|
||||
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
|
||||
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
|
||||
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
|
||||
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
|
||||
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
|
||||
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
|
||||
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
|
||||
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
|
||||
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
|
||||
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
|
||||
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
|
||||
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
|
||||
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
|
||||
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01
|
||||
419
CONTRIBUTING.md
419
CONTRIBUTING.md
@@ -1,80 +1,389 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Thank you for your interest in contributing to our project! We greatly value feedback and contributions from our community. This document will guide you through the contribution process.
|
||||
## Welcome to SigNoz Contributing section 🎉
|
||||
|
||||
## How can I contribute?
|
||||
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
### Finding Issues to Work On
|
||||
- Check our [existing open issues](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue)
|
||||
- Look for [good first issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with
|
||||
- Review [recently closed issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) to avoid duplicates
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
|
||||
|
||||
### Types of Contributions
|
||||
- We accept contributions made to the [SigNoz `develop` branch]()
|
||||
- Find all SigNoz Docker Hub images here
|
||||
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
|
||||
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
|
||||
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
|
||||
|
||||
1. **Report Bugs**: Use our [Bug Report template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
2. **Request Features**: Submit using [Feature Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
3. **Improve Documentation**: Create an issue with the `documentation` label
|
||||
4. **Report Performance Issues**: Use our [Performance Issue template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=)
|
||||
5. **Request Dashboards**: Submit using [Dashboard Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+)
|
||||
6. **Report Security Issues**: Follow our [Security Policy](https://github.com/SigNoz/signoz/security/policy)
|
||||
7. **Join Discussions**: Participate in [project discussions](https://github.com/SigNoz/signoz/discussions)
|
||||
## Finding contributions to work on 💬
|
||||
|
||||
### Creating Helpful Issues
|
||||
Looking at the existing issues is a great way to find something to contribute on.
|
||||
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
|
||||
|
||||
When creating issues, include:
|
||||
|
||||
- **For Feature Requests**:
|
||||
- Clear use case and requirements
|
||||
- Proposed solution or improvement
|
||||
- Any open questions or considerations
|
||||
## Sections:
|
||||
- [General Instructions](#1-general-instructions-)
|
||||
- [For Creating Issue(s)](#11-for-creating-issues)
|
||||
- [For Pull Requests(s)](#12-for-pull-requests)
|
||||
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
|
||||
- [Develop Frontend](#3-develop-frontend-)
|
||||
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
|
||||
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
|
||||
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
|
||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
|
||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||
|
||||
- **For Bug Reports**:
|
||||
- Step-by-step reproduction steps
|
||||
- Version information
|
||||
- Relevant environment details
|
||||
- Any modifications you've made
|
||||
- Expected vs actual behavior
|
||||
# 1. General Instructions 📝
|
||||
|
||||
### Submitting Pull Requests
|
||||
## 1.1 For Creating Issue(s)
|
||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||
|
||||
1. **Development**:
|
||||
- Setup your [development environment](docs/contributing/development.md)
|
||||
- Work against the latest `main` branch
|
||||
- Focus on specific changes
|
||||
- Ensure all tests pass locally
|
||||
- Follow our [commit convention](#commit-convention)
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
|
||||
2. **Submit PR**:
|
||||
- Ensure your branch can be auto-merged
|
||||
- Address any CI failures
|
||||
- Respond to review comments promptly
|
||||
#### Details like these are incredibly useful:
|
||||
|
||||
For substantial changes, please split your contribution into multiple PRs:
|
||||
- **Requirement** - what kind of use case are you trying to solve?
|
||||
- **Proposal** - what do you suggest to solve the problem or improve the existing
|
||||
situation?
|
||||
- Any open questions to address❓
|
||||
|
||||
1. First PR: Overall structure (README, configurations, interfaces)
|
||||
2. Second PR: Core implementation (split further if needed)
|
||||
3. Final PR: Documentation updates and end-to-end tests
|
||||
#### If you are reporting a bug, details like these are incredibly useful:
|
||||
|
||||
### Commit Convention
|
||||
- A reproducible test case or series of steps.
|
||||
- The version of our code being used.
|
||||
- Any modifications you've made relevant to the bug🐞.
|
||||
- Anything unusual about your environment or deployment.
|
||||
|
||||
We follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/). All commits and PRs should include type specifiers (e.g., `feat:`, `fix:`, `docs:`, etc.).
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone 🙌.
|
||||
|
||||
## How can I contribute to other repositories?
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
You can find other repositories in the [SigNoz](https://github.com/SigNoz) organization to contribute to. Here is a list of **highlighted** repositories:
|
||||
## 1.2 For Pull Request(s)
|
||||
|
||||
- [charts](https://github.com/SigNoz/charts)
|
||||
- [dashboards](https://github.com/SigNoz/dashboards)
|
||||
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
|
||||
Before sending us a pull request, please ensure that,
|
||||
|
||||
Each repository has its own contributing guidelines. Please refer to the guidelines of the repository you want to contribute to.
|
||||
- Fork the SigNoz repo on GitHub, clone it on your machine.
|
||||
- Create a branch with your changes.
|
||||
- You are working against the latest source on the `develop` branch.
|
||||
- Modify the source; please focus only on the specific change you are contributing.
|
||||
- Ensure local tests pass.
|
||||
- Commit to your fork using clear commit messages.
|
||||
- Send us a pull request, answering any default questions in the pull request interface.
|
||||
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
|
||||
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
|
||||
- Once the change has been approved and merged, we will inform you in a comment.
|
||||
|
||||
## How can I get help?
|
||||
|
||||
Need assistance? Join our Slack community:
|
||||
- [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M)
|
||||
- [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B)
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
## Where do I go from here?
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
|
||||
|
||||
- Set up your [development environment](docs/contributing/development.md)
|
||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
||||
* 1️⃣ First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes, etc...
|
||||
* This PR is usually trivial to review, so the size limit does not apply to
|
||||
it.
|
||||
* 2️⃣ Second PR should include the concrete implementation of the component. If the
|
||||
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
|
||||
multiple PRs.
|
||||
* If there are multiple sub-component then ideally each one should be implemented as
|
||||
a **separate** pull request.
|
||||
* Last PR should include changes to **any user-facing documentation.** And should include
|
||||
end-to-end tests if applicable. The component must be enabled
|
||||
only after sufficient testing, and there is enough confidence in the
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
|
||||
|
||||
### Pointers:
|
||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
|
||||
|
||||
<hr>
|
||||
|
||||
### Conventions to follow when submitting Commits and Pull Request(s).
|
||||
|
||||
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
|
||||
|
||||
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
|
||||
|
||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 2. How to Contribute 🙋🏻♂️
|
||||
|
||||
#### There are primarily 2 areas in which you can contribute to SigNoz
|
||||
|
||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
|
||||
|
||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||
|
||||
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 3. Develop Frontend 🌚
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
|
||||
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
|
||||
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
|
||||
|
||||

|
||||
|
||||
|
||||
- run `cd deploy` to move to deploy directory,
|
||||
- Install signoz locally **without** the frontend,
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
|
||||
```
|
||||
ports:
|
||||
- "8080:8080"
|
||||
```
|
||||
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
|
||||
|
||||
- Next run,
|
||||
```
|
||||
cd deploy/docker
|
||||
sudo docker compose up -d
|
||||
```
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
|
||||
If you have backend api exposed via frontend nginx:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:3301
|
||||
```
|
||||
If not:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz/frontend directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
|
||||
````
|
||||
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
|
||||
|
||||
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
|
||||
|
||||
## 4.1 Prerequisites
|
||||
|
||||
### 4.1.1 Install SQLite3
|
||||
|
||||
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
|
||||
|
||||
- If not installed already, Install using below command
|
||||
- on Linux
|
||||
- on Debian / Ubuntu
|
||||
```
|
||||
sudo apt install sqlite3
|
||||
```
|
||||
- on CentOS / Fedora / RedHat
|
||||
```
|
||||
sudo yum install sqlite3
|
||||
```
|
||||
|
||||
## 4.2 To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
|
||||
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||
|
||||
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
|
||||
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
|
||||
```
|
||||
ports:
|
||||
- 9001:9000
|
||||
```
|
||||
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
|
||||
|
||||
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||
- Then, you need to create a `.env` file with the following environment variable
|
||||
```
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
|
||||
```
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
|
||||
|
||||
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
|
||||
|
||||
#### Run locally,
|
||||
```
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||
```
|
||||
|
||||
#### Build and Run locally
|
||||
```
|
||||
cd pkg/query-service
|
||||
go build -o build/query-service main.go
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
|
||||
```
|
||||
|
||||
#### Docker Images
|
||||
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
|
||||
|
||||
```
|
||||
docker pull signoz/query-service
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:latest
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:develop
|
||||
```
|
||||
|
||||
### Important Note:
|
||||
|
||||
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
Click the button below. A workspace with all required environments will be created.
|
||||
|
||||
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 5. Contribute to SigNoz Helm Chart 📊
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
|
||||
|
||||
## 5.1 To run helm chart for local development
|
||||
|
||||
- Clone the SigNoz repository and cd into charts directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/charts.git && cd charts
|
||||
```
|
||||
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [k3d](https://k3d.io/#installation)
|
||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
|
||||
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
|
||||
- next run,
|
||||
```
|
||||
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
|
||||
```
|
||||
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
**5.1.2 To load data with the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**5.1.3 To stop the load generation:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
---
|
||||
|
||||
# 6. Contribute to Dashboards 📈
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
|
||||
|
||||
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
|
||||
|
||||
1. Create a dashboard JSON file.
|
||||
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
|
||||
3. Include screenshots of the dashboard in the `assets/` directory.
|
||||
4. Submit a pull request for review.
|
||||
|
||||
## Other Ways to Contribute
|
||||
|
||||
There are many other ways to get involved with the community and to participate in this project:
|
||||
|
||||
- Use the product, submitting GitHub issues when a problem is found.
|
||||
- Help code review pull requests and participate in issue threads.
|
||||
- Submit a new feature request as an issue.
|
||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||
- Tell others about the project on Twitter, your blog, etc.
|
||||
|
||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
Thank You!
|
||||
|
||||
354
Makefile
354
Makefile
@@ -1,200 +1,196 @@
|
||||
##############################################################
|
||||
# variables
|
||||
##############################################################
|
||||
SHELL := /bin/bash
|
||||
SRC ?= $(shell pwd)
|
||||
NAME ?= signoz
|
||||
OS ?= $(shell uname -s | tr '[A-Z]' '[a-z]')
|
||||
ARCH ?= $(shell uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
|
||||
COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD)
|
||||
BRANCH_NAME ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
VERSION ?= $(BRANCH_NAME)-$(COMMIT_SHORT_SHA)
|
||||
TIMESTAMP ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
ARCHS ?= amd64 arm64
|
||||
TARGET_DIR ?= $(shell pwd)/target
|
||||
#
|
||||
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
|
||||
#
|
||||
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
GO_BUILD_LDFLAG_ZEUS_URL = -X github.com/SigNoz/signoz/ee/zeus.url=$(ZEUS_URL)
|
||||
LICENSE_URL ?= https://license.signoz.io
|
||||
GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=$(LICENSE_URL)
|
||||
# Build variables
|
||||
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||
|
||||
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
|
||||
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
|
||||
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
||||
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
|
||||
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm
|
||||
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
|
||||
|
||||
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
|
||||
DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
|
||||
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
|
||||
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
|
||||
DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
|
||||
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
|
||||
JS_BUILD_CONTEXT = $(SRC)/frontend
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
|
||||
##############################################################
|
||||
# directories
|
||||
##############################################################
|
||||
$(TARGET_DIR):
|
||||
mkdir -p $(TARGET_DIR)
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
|
||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||
|
||||
##############################################################
|
||||
# common commands
|
||||
##############################################################
|
||||
.PHONY: help
|
||||
help: ## Displays help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[a-z0-9A-Z_-]+:.*?##/ { printf " \033[36m%-40s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/signoz
|
||||
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
|
||||
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
|
||||
|
||||
##############################################################
|
||||
# devenv commands
|
||||
##############################################################
|
||||
.PHONY: devenv-clickhouse
|
||||
devenv-clickhouse: ## Run clickhouse in devenv
|
||||
@cd .devenv/docker/clickhouse; \
|
||||
docker compose -f compose.yaml up -d
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
|
||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
|
||||
.PHONY: devenv-postgres
|
||||
devenv-postgres: ## Run postgres in devenv
|
||||
@cd .devenv/docker/postgres; \
|
||||
docker compose -f compose.yaml up -d
|
||||
all: build-push-frontend build-push-query-service
|
||||
|
||||
##############################################################
|
||||
# go commands
|
||||
##############################################################
|
||||
.PHONY: go-run-enterprise
|
||||
go-run-enterprise: ## Runs the enterprise go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
SIGNOZ_JWT_SECRET=secret \
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
# Steps to build static files of frontend
|
||||
build-frontend-static:
|
||||
@echo "------------------"
|
||||
@echo "--> Building frontend static files"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
rm -rf build && \
|
||||
CI=1 yarn install && \
|
||||
yarn build && \
|
||||
ls -l build
|
||||
|
||||
.PHONY: go-test
|
||||
go-test: ## Runs go unit tests
|
||||
@go test -race ./...
|
||||
# Steps to build and push docker image of frontend
|
||||
.PHONY: build-frontend-amd64 build-push-frontend
|
||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||
build-frontend-amd64: build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building frontend docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
.PHONY: go-run-community
|
||||
go-run-community: ## Runs the community go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
SIGNOZ_JWT_SECRET=secret \
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
# Step to build and push docker image of frontend(used in push pipeline)
|
||||
build-push-frontend: build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing frontend docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
go-build-community: ## Builds the go backend server for community
|
||||
go-build-community: $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
$(GO_BUILD_ARCHS_COMMUNITY): go-build-community-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)-community"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
# Steps to build static binary of query service
|
||||
.PHONY: build-query-service-static
|
||||
build-query-service-static:
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service static binary"
|
||||
@echo "------------------"
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
|
||||
fi
|
||||
|
||||
.PHONY: build-query-service-static-amd64
|
||||
build-query-service-static-amd64:
|
||||
make GOARCH=amd64 build-query-service-static
|
||||
|
||||
.PHONY: go-build-enterprise $(GO_BUILD_ARCHS_ENTERPRISE)
|
||||
go-build-enterprise: ## Builds the go backend server for enterprise
|
||||
go-build-enterprise: $(GO_BUILD_ARCHS_ENTERPRISE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
.PHONY: build-query-service-static-arm64
|
||||
build-query-service-static-arm64:
|
||||
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
|
||||
|
||||
# Steps to build static binary of query service for all platforms
|
||||
.PHONY: build-query-service-static-all
|
||||
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
.PHONY: build-query-service-amd64 build-push-query-service
|
||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-query-service: build-query-service-static-all
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||
--push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
||||
build-ee-query-service-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
|
||||
|
||||
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-ee-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
|
||||
|
||||
dev-setup:
|
||||
mkdir -p /var/lib/signoz
|
||||
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
|
||||
mkdir -p pkg/query-service/config/dashboards
|
||||
@echo "------------------"
|
||||
@echo "--> Local Setup completed"
|
||||
@echo "------------------"
|
||||
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
run-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||
|
||||
run-testing:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
|
||||
|
||||
down-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
clear-standalone-data:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
clear-swarm-data:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
clear-standalone-ch:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
clear-swarm-ch:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
check-no-ee-references:
|
||||
@echo "Checking for 'ee' package references in 'pkg' directory..."
|
||||
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
|
||||
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
|
||||
exit 1; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"; \
|
||||
fi
|
||||
|
||||
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
go-build-enterprise-race: ## Builds the go backend server for enterprise with race
|
||||
go-build-enterprise-race: $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
test:
|
||||
go test ./pkg/...
|
||||
|
||||
goreleaser-snapshot:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
cd ${GORELEASER_WORKDIR} && \
|
||||
goreleaser release --clean --snapshot; \
|
||||
cd -; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
goreleaser release --clean --snapshot; \
|
||||
fi
|
||||
|
||||
##############################################################
|
||||
# js commands
|
||||
##############################################################
|
||||
.PHONY: js-build
|
||||
js-build: ## Builds the js frontend
|
||||
@echo ">> building js frontend"
|
||||
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
|
||||
|
||||
##############################################################
|
||||
# docker commands
|
||||
##############################################################
|
||||
.PHONY: docker-build-community $(DOCKER_BUILD_ARCHS_COMMUNITY)
|
||||
docker-build-community: ## Builds the docker image for community
|
||||
docker-build-community: $(DOCKER_BUILD_ARCHS_COMMUNITY)
|
||||
$(DOCKER_BUILD_ARCHS_COMMUNITY): docker-build-community-%: go-build-community-% js-build
|
||||
@echo ">> building docker image for $(NAME)-community"
|
||||
@docker build -t "$(DOCKER_REGISTRY_COMMUNITY):$(VERSION)-$*" \
|
||||
--build-arg TARGETARCH="$*" \
|
||||
-f $(DOCKERFILE_COMMUNITY) $(SRC)
|
||||
|
||||
.PHONY: docker-buildx-community
|
||||
docker-buildx-community: ## Builds the docker image for community using buildx
|
||||
docker-buildx-community: go-build-community js-build
|
||||
@echo ">> building docker image for $(NAME)-community"
|
||||
@docker buildx build --file $(DOCKERFILE_COMMUNITY) \
|
||||
--progress plain \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_COMMUNITY):$(VERSION) $(SRC)
|
||||
|
||||
.PHONY: docker-build-enterprise $(DOCKER_BUILD_ARCHS_ENTERPRISE)
|
||||
docker-build-enterprise: ## Builds the docker image for enterprise
|
||||
docker-build-enterprise: $(DOCKER_BUILD_ARCHS_ENTERPRISE)
|
||||
$(DOCKER_BUILD_ARCHS_ENTERPRISE): docker-build-enterprise-%: go-build-enterprise-% js-build
|
||||
@echo ">> building docker image for $(NAME)"
|
||||
@docker build -t "$(DOCKER_REGISTRY_ENTERPRISE):$(VERSION)-$*" \
|
||||
--build-arg TARGETARCH="$*" \
|
||||
-f $(DOCKERFILE_ENTERPRISE) $(SRC)
|
||||
|
||||
.PHONY: docker-buildx-enterprise
|
||||
docker-buildx-enterprise: ## Builds the docker image for enterprise using buildx
|
||||
docker-buildx-enterprise: go-build-enterprise js-build
|
||||
@echo ">> building docker image for $(NAME)"
|
||||
@docker buildx build --file $(DOCKERFILE_ENTERPRISE) \
|
||||
--progress plain \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_ENTERPRISE):$(VERSION) $(SRC)
|
||||
|
||||
##############################################################
|
||||
# python commands
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && poetry run black .
|
||||
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && poetry run isort .
|
||||
@cd tests/integration && poetry run autoflake .
|
||||
@cd tests/integration && poetry run pylint .
|
||||
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
goreleaser-snapshot-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
|
||||
|
||||
@@ -3,12 +3,6 @@
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Version #####################
|
||||
version:
|
||||
banner:
|
||||
# Whether to enable the version banner on startup.
|
||||
enabled: true
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
@@ -50,7 +44,7 @@ cache:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanup_interval: 1m
|
||||
cleanupInterval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
@@ -72,6 +66,7 @@ sqlstore:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
timeout:
|
||||
@@ -87,39 +82,21 @@ apiserver:
|
||||
# List of routes to exclude from request responselogging.
|
||||
excluded_routes:
|
||||
- /api/v1/health
|
||||
- /api/v1/version
|
||||
- /
|
||||
|
||||
|
||||
##################### TelemetryStore #####################
|
||||
telemetrystore:
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
# Maximum number of idle connections in the connection pool.
|
||||
max_idle_conns: 50
|
||||
# Maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
# Maximum time to wait for a connection to be established.
|
||||
dial_timeout: 5s
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
clickhouse:
|
||||
# The DSN to use for clickhouse.
|
||||
dsn: tcp://localhost:9000
|
||||
# The query settings for clickhouse.
|
||||
settings:
|
||||
max_execution_time: 0
|
||||
max_execution_time_leaf: 0
|
||||
timeout_before_checking_execution_speed: 0
|
||||
max_bytes_to_read: 0
|
||||
max_result_rows_for_ch_query: 0
|
||||
|
||||
##################### Prometheus #####################
|
||||
prometheus:
|
||||
active_query_tracker:
|
||||
# Whether to enable the active query tracker.
|
||||
enabled: true
|
||||
# The path to use for the active query tracker.
|
||||
path: ""
|
||||
# The maximum number of concurrent queries.
|
||||
max_concurrent: 20
|
||||
# The DSN to use for ClickHouse.
|
||||
dsn: http://localhost:9000
|
||||
|
||||
##################### Alertmanager #####################
|
||||
alertmanager:
|
||||
@@ -132,7 +109,7 @@ alertmanager:
|
||||
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
||||
poll_interval: 1m
|
||||
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
|
||||
external_url: http://localhost:8080
|
||||
external_url: http://localhost:9093
|
||||
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
|
||||
global:
|
||||
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
|
||||
@@ -164,54 +141,3 @@ alertmanager:
|
||||
maintenance_interval: 15m
|
||||
# Retention of the notification logs.
|
||||
retention: 120h
|
||||
|
||||
|
||||
##################### Analytics #####################
|
||||
analytics:
|
||||
# Whether to enable analytics.
|
||||
enabled: false
|
||||
|
||||
##################### Emailing #####################
|
||||
emailing:
|
||||
# Whether to enable emailing.
|
||||
enabled: false
|
||||
templates:
|
||||
# The directory containing the email templates. This directory should contain a list of files defined at pkg/types/emailtypes/template.go.
|
||||
directory: /opt/signoz/conf/templates/email
|
||||
smtp:
|
||||
# The SMTP server address.
|
||||
address: localhost:25
|
||||
# The email address to use for the SMTP server.
|
||||
from:
|
||||
# The hello message to use for the SMTP server.
|
||||
hello:
|
||||
# The static headers to send with the email.
|
||||
headers: {}
|
||||
auth:
|
||||
# The username to use for the SMTP server.
|
||||
username:
|
||||
# The password to use for the SMTP server.
|
||||
password:
|
||||
# The secret to use for the SMTP server.
|
||||
secret:
|
||||
# The identity to use for the SMTP server.
|
||||
identity:
|
||||
tls:
|
||||
# Whether to enable TLS. It should be false in most cases since the authentication mechanism should use the STARTTLS extension instead.
|
||||
enabled: false
|
||||
# Whether to skip TLS verification.
|
||||
insecure_skip_verify: false
|
||||
# The path to the CA file.
|
||||
ca_file_path:
|
||||
# The path to the key file.
|
||||
key_file_path:
|
||||
# The path to the certificate file.
|
||||
cert_file_path:
|
||||
|
||||
##################### Sharder (experimental) #####################
|
||||
sharder:
|
||||
# Specifies the sharder provider to use.
|
||||
provider: noop
|
||||
single:
|
||||
# The org id to which this instance belongs to.
|
||||
org_id: org_id
|
||||
|
||||
@@ -26,7 +26,7 @@ cd deploy/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Open http://localhost:8080 in your favourite browser.
|
||||
Open http://localhost:3301 in your favourite browser.
|
||||
|
||||
To start collecting logs and metrics from your infrastructure, run the following command:
|
||||
|
||||
@@ -55,7 +55,7 @@ cd deploy/docker-swarm
|
||||
docker stack deploy -c docker-compose.yaml signoz
|
||||
```
|
||||
|
||||
Open http://localhost:8080 in your favourite browser.
|
||||
Open http://localhost:3301 in your favourite browser.
|
||||
|
||||
To start collecting logs and metrics from your infrastructure, run the following command:
|
||||
|
||||
|
||||
64
deploy/common/signoz/nginx-config.conf
Normal file
64
deploy/common/signoz/nginx-config.conf
Normal file
@@ -0,0 +1,64 @@
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||
proxy_pass http://query-service:8080;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# dont buffer the data send it directly to client.
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
proxy_pass http://query-service:8080/ws;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade "websocket";
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
server_endpoint: ws://signoz:4320/v1/opamp
|
||||
server_endpoint: ws://query-service:4320/v1/opamp
|
||||
|
||||
@@ -172,28 +172,39 @@ services:
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:0.23.7
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/alertmanager:/data
|
||||
depends_on:
|
||||
- query-service
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.85.3
|
||||
image: signoz/query-service:0.75.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
- SIGNOZ_JWT_SECRET=secret
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -204,9 +215,19 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:0.75.0
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.42
|
||||
image: signoz/signoz-otel-collector:0.111.29
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -227,10 +248,10 @@ services:
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- signoz
|
||||
- query-service
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.42
|
||||
image: signoz/signoz-schema-migrator:0.111.29
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -245,6 +266,8 @@ networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
clickhouse-2:
|
||||
|
||||
@@ -108,21 +108,33 @@ services:
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:0.23.7
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
- query-service
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.85.3
|
||||
image: signoz/query-service:0.75.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
@@ -139,9 +151,19 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:0.75.0
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.42
|
||||
image: signoz/signoz-otel-collector:0.111.29
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -162,10 +184,10 @@ services:
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- signoz
|
||||
- query-service
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.42
|
||||
image: signoz/signoz-schema-migrator:0.111.29
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -180,6 +202,8 @@ networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
|
||||
@@ -42,7 +42,7 @@ receivers:
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^(signoz_(logspout|signoz|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
|
||||
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
|
||||
@@ -26,7 +26,7 @@ processors:
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
@@ -64,10 +64,8 @@ exporters:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
disable_v2: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
disable_v2: true
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
|
||||
@@ -175,22 +175,36 @@ services:
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse-3:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.85.3}
|
||||
container_name: signoz
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "3301:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
@@ -207,10 +221,21 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.42}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -232,11 +257,11 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
signoz:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -247,7 +272,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
@@ -258,6 +283,8 @@ networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
clickhouse-2:
|
||||
|
||||
224
deploy/docker/docker-compose.testing.yaml
Normal file
224
deploy/docker/docker-compose.testing.yaml
Normal file
@@ -0,0 +1,224 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --gateway-url=https://api.staging.signoz.cloud
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
restart: on-failure
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
@@ -108,22 +108,36 @@ services:
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.85.3}
|
||||
container_name: signoz
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "3301:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
@@ -140,9 +154,20 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.42}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -160,11 +185,11 @@ services:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
signoz:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -176,7 +201,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
@@ -187,6 +212,8 @@ networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
|
||||
@@ -79,7 +79,7 @@ receivers:
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz|(signoz-(|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
|
||||
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
|
||||
@@ -26,7 +26,7 @@ processors:
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
@@ -62,12 +62,10 @@ exporters:
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
disable_v2: true
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
disable_v2: true
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
|
||||
@@ -93,7 +93,7 @@ check_os() {
|
||||
;;
|
||||
Red\ Hat*)
|
||||
desired_os=1
|
||||
os="rhel"
|
||||
os="red hat"
|
||||
package_manager="yum"
|
||||
;;
|
||||
CentOS*)
|
||||
@@ -127,7 +127,7 @@ check_os() {
|
||||
# The script should error out in case they aren't available
|
||||
check_ports_occupied() {
|
||||
local port_check_output
|
||||
local ports_pattern="8080|4317"
|
||||
local ports_pattern="3301|4317"
|
||||
|
||||
if is_mac; then
|
||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||
@@ -144,7 +144,7 @@ check_ports_occupied() {
|
||||
send_event "port_not_available"
|
||||
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "SigNoz requires ports 8080 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
@@ -248,7 +248,7 @@ wait_for_containers_start() {
|
||||
|
||||
# The while loop is important because for-loops don't work for dynamic values
|
||||
while [[ $timeout -gt 0 ]]; do
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:8080/api/v1/health?live=1" || true)"
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
@@ -484,7 +484,7 @@ pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
|
||||
|
||||
# check for open ports, if signoz is not installed
|
||||
if is_command_present docker-compose; then
|
||||
if $sudo_cmd $docker_compose_cmd ps | grep "signoz" | grep -q "healthy" > /dev/null 2>&1; then
|
||||
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
|
||||
echo "SigNoz already installed, skipping the occupied ports check"
|
||||
else
|
||||
check_ports_occupied
|
||||
@@ -533,7 +533,7 @@ else
|
||||
echo ""
|
||||
echo "🟢 Your installation is complete!"
|
||||
echo ""
|
||||
echo -e "🟢 SigNoz is running on http://localhost:8080"
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
# Development Guide
|
||||
|
||||
Welcome! This guide will help you set up your local development environment for SigNoz. Let's get you started! 🚀
|
||||
|
||||
## What do I need?
|
||||
|
||||
Before diving in, make sure you have these tools installed:
|
||||
|
||||
- **Git** - Our version control system
|
||||
- Download from [git-scm.com](https://git-scm.com/)
|
||||
|
||||
- **Go** - Powers our backend
|
||||
- Download from [go.dev/dl](https://go.dev/dl/)
|
||||
- Check [go.mod](../../go.mod#L3) for the minimum version
|
||||
|
||||
- **GCC** - Required for CGO dependencies
|
||||
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
|
||||
|
||||
- **Node** - Powers our frontend
|
||||
- Download from [nodejs.org](https://nodejs.org)
|
||||
- Check [.nvmrc](../../frontend/.nvmrc) for the version
|
||||
|
||||
- **Yarn** - Our frontend package manager
|
||||
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
|
||||
|
||||
- **Docker** - For running Clickhouse and Postgres locally
|
||||
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
|
||||
|
||||
> 💡 **Tip**: Run `make help` to see all available commands with descriptions
|
||||
|
||||
## How do I get the code?
|
||||
|
||||
1. Open your terminal
|
||||
2. Clone the repository:
|
||||
```bash
|
||||
git clone https://github.com/SigNoz/signoz.git
|
||||
```
|
||||
3. Navigate to the project:
|
||||
```bash
|
||||
cd signoz
|
||||
```
|
||||
|
||||
## How do I run it locally?
|
||||
|
||||
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
|
||||
|
||||
### 1. Setting up Clickhouse
|
||||
|
||||
First, we need to get Clickhouse running:
|
||||
|
||||
```bash
|
||||
make devenv-clickhouse
|
||||
```
|
||||
|
||||
This command:
|
||||
- Starts Clickhouse in a single-shard, single-replica cluster
|
||||
- Sets up Zookeeper
|
||||
- Runs the latest schema migrations
|
||||
|
||||
### 2. Starting the Backend
|
||||
|
||||
1. Run the backend server:
|
||||
```bash
|
||||
make go-run-community
|
||||
```
|
||||
|
||||
2. Verify it's working:
|
||||
```bash
|
||||
curl http://localhost:8080/api/v1/health
|
||||
```
|
||||
|
||||
You should see: `{"status":"ok"}`
|
||||
|
||||
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
|
||||
|
||||
### 3. Setting up the Frontend
|
||||
|
||||
1. Install dependencies:
|
||||
```bash
|
||||
yarn install
|
||||
```
|
||||
|
||||
2. Create a `.env` file in the `frontend` directory:
|
||||
```env
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
3. Start the development server:
|
||||
```bash
|
||||
yarn dev
|
||||
```
|
||||
|
||||
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
|
||||
|
||||
Now you're all set to start developing! Happy coding! 🎉
|
||||
@@ -1,103 +0,0 @@
|
||||
# Errors
|
||||
|
||||
SigNoz includes its own structured [errors](/pkg/errors/errors.go) package. It's built on top of Go's `error` interface, extending it to add additional context that helps provide more meaningful error messages throughout the application.
|
||||
|
||||
## How to use it?
|
||||
|
||||
To use the SigNoz structured errors package, use these functions instead of the standard library alternatives:
|
||||
|
||||
```go
|
||||
// Instead of errors.New()
|
||||
errors.New(typ, code, message)
|
||||
|
||||
// Instead of fmt.Errorf()
|
||||
errors.Newf(typ, code, message, args...)
|
||||
```
|
||||
|
||||
### Typ
|
||||
The Typ (read as Type, defined as `typ`) is used to categorize errors across the codebase and is loosely coupled with HTTP/GRPC status codes. All predefined types can be found in [pkg/errors/type.go](/pkg/errors/type.go). For example:
|
||||
|
||||
- `TypeInvalidInput` - Indicates invalid input was provided
|
||||
- `TypeNotFound` - Indicates a resource was not found
|
||||
|
||||
By design, `typ` is unexported and cannot be declared outside of [errors](/pkg/errors/errors.go) package. This ensures that it is consistent across the codebase and is used in a way that is meaningful.
|
||||
|
||||
### Code
|
||||
Codes are used to provide more granular categorization within types. For instance, a type of `TypeInvalidInput` might have codes like `CodeInvalidEmail` or `CodeInvalidPassword`.
|
||||
|
||||
To create new error codes, use the `errors.MustNewCode` function:
|
||||
|
||||
```go
|
||||
var (
|
||||
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
|
||||
CodeThingNotFound = errors.MustNewCode("thing_not_found")
|
||||
)
|
||||
```
|
||||
|
||||
> 💡 **Note**: Error codes must match the regex `^[a-z_]+$` otherwise the code will panic.
|
||||
|
||||
## Show me some examples
|
||||
|
||||
### Using the error
|
||||
A basic example of using the error:
|
||||
|
||||
```go
|
||||
var (
|
||||
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
|
||||
)
|
||||
|
||||
func CreateThing(id string) error {
|
||||
t, err := thing.GetFromStore(id)
|
||||
if err != nil {
|
||||
if errors.As(err, errors.TypeNotFound) {
|
||||
// thing was not found, create it
|
||||
return thing.Create(id)
|
||||
}
|
||||
|
||||
// something else went wrong, wrap the error with more context
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeUnknown, "failed to get thing from store")
|
||||
}
|
||||
|
||||
return errors.Newf(errors.TypeAlreadyExists, CodeThingAlreadyExists, "thing with id %s already exists", id)
|
||||
}
|
||||
```
|
||||
|
||||
### Changing the error
|
||||
Sometimes you may want to change the error while preserving the message:
|
||||
|
||||
```go
|
||||
func GetUserSecurely(id string) (*User, error) {
|
||||
user, err := repository.GetUser(id)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
// Convert NotFound to Forbidden for security reasons
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeAccessDenied, "access denied to requested resource")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return user, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Why do we need this?
|
||||
|
||||
In a large codebase like SigNoz, error handling is critical for maintaining reliability, debuggability, and a good user experience. We believe that it is the **responsibility of a function** to return **well-defined** errors that **accurately describe what went wrong**. With our structured error system:
|
||||
|
||||
- Functions can create precise errors with appropriate additional context
|
||||
- Callers can make informed decisions based on the additional context
|
||||
- Error context is preserved and enhanced as it moves up the call stack
|
||||
|
||||
The caller (which can be another function or a HTTP/gRPC handler or something else entirely), can then choose to use this error to take appropriate actions such as:
|
||||
|
||||
- A function can branch into different paths based on the context
|
||||
- An HTTP/gRPC handler can derive the correct status code and message from the error and send it to the client
|
||||
- Logging systems can capture structured error information for better diagnostics
|
||||
|
||||
Although there might be cases where this might seem too verbose, it makes the code more maintainable and consistent. A little verbose code is better than clever code that doesn't provide enough context.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Think about error handling as you write your code, not as an afterthought.
|
||||
- Always use the [errors](/pkg/errors/errors.go) package instead of the standard library's `errors.New()` or `fmt.Errorf()`.
|
||||
- Always assign appropriate codes to errors when creating them instead of using the "catch all" error codes defined in [pkg/errors/code.go](/pkg/errors/code.go).
|
||||
- Use `errors.Wrapf()` to add context to errors while preserving the original when appropriate.
|
||||
@@ -1,11 +0,0 @@
|
||||
# Go
|
||||
|
||||
This document provides an overview of contributing to the SigNoz backend written in Go. The SigNoz backend is built with Go, focusing on performance, maintainability, and developer experience. We strive for clean, idiomatic code that follows established Go practices while addressing the unique needs of an observability platform.
|
||||
|
||||
We adhere to three primary style guides as our foundation:
|
||||
|
||||
- [Effective Go](https://go.dev/doc/effective_go) - For writing idiomatic Go code
|
||||
- [Code Review Comments](https://go.dev/wiki/CodeReviewComments) - For understanding common comments in code reviews
|
||||
- [Google Style Guide](https://google.github.io/styleguide/go/) - Additional practices from Google
|
||||
|
||||
We **recommend** (almost enforce) reviewing these guides before contributing to the codebase. They provide valuable insights into writing idiomatic Go code and will help you understand our approach to backend development. In addition, we have a few additional rules that make certain areas stricter than the above which can be found in area-specific files in this package.
|
||||
@@ -1,94 +0,0 @@
|
||||
# SQL
|
||||
SigNoz utilizes a relational database to store metadata including organization information, user data and other settings.
|
||||
|
||||
## How to use it?
|
||||
|
||||
The database interface is defined in [SQLStore](/pkg/sqlstore/sqlstore.go). SigNoz leverages the Bun ORM to interact with the underlying database. To access the database instance, use the `BunDBCtx` function. For operations that require transactions across multiple database operations, use the `RunInTxCtx` function. This function embeds a transaction in the context, which propagates through various functions in the callback.
|
||||
|
||||
```go
|
||||
type Thing struct {
|
||||
bun.BaseModel
|
||||
|
||||
ID types.Identifiable `bun:",embed"`
|
||||
SomeColumn string `bun:"some_column"`
|
||||
TimeAuditable types.TimeAuditable `bun:",embed"`
|
||||
OrgID string `bun:"org_id"`
|
||||
}
|
||||
|
||||
func GetThing(ctx context.Context, id string) (*Thing, error) {
|
||||
thing := new(Thing)
|
||||
err := sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(thing).
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
|
||||
return thing, err
|
||||
}
|
||||
|
||||
func CreateThing(ctx context.Context, thing *Thing) error {
|
||||
return sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewInsert().
|
||||
Model(thing).
|
||||
Exec(ctx)
|
||||
}
|
||||
```
|
||||
|
||||
> 💡 **Note**: Always use line breaks while working with SQL queries to enhance code readability.
|
||||
|
||||
> 💡 **Note**: Always use the `new` function to create new instances of structs.
|
||||
|
||||
## What are hooks?
|
||||
|
||||
Hooks are user-defined functions that execute before and/or after specific database operations. These hooks are particularly useful for generating telemetry data such as logs, traces, and metrics, providing visibility into database interactions. Hooks are defined in the [SQLStoreHook](/pkg/sqlstore/sqlstore.go) interface.
|
||||
|
||||
## How is the schema designed?
|
||||
|
||||
SigNoz implements a star schema design with the organizations table as the central entity. All other tables link to the organizations table via foreign key constraints on the `org_id` column. This design ensures that every entity within the system is either directly or indirectly associated with an organization.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
ORGANIZATIONS {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
}
|
||||
ENTITY_A {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
string org_id FK
|
||||
}
|
||||
ENTITY_B {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
string org_id FK
|
||||
}
|
||||
|
||||
ORGANIZATIONS ||--o{ ENTITY_A : contains
|
||||
ORGANIZATIONS ||--o{ ENTITY_B : contains
|
||||
```
|
||||
|
||||
> 💡 **Note**: There are rare exceptions to the above star schema design. Consult with the maintainers before deviating from the above design.
|
||||
|
||||
All tables follow a consistent primary key pattern using a `id` column (referenced by the `types.Identifiable` struct) and include `created_at` and `updated_at` columns (referenced by the `types.TimeAuditable` struct) for audit purposes.
|
||||
|
||||
## How to write migrations?
|
||||
|
||||
For schema migrations, use the [SQLMigration](/pkg/sqlmigration/sqlmigration.go) interface and write the migration in the same package. When creating migrations, adhere to these guidelines:
|
||||
|
||||
- Do not implement **`ON CASCADE` foreign key constraints**. Deletion operations should be handled explicitly in application logic rather than delegated to the database.
|
||||
- Do not **import types from the types package** in the `sqlmigration` package. Instead, define the required types within the migration package itself. This practice ensures migration stability as the core types evolve over time.
|
||||
- Do not implement **`Down` migrations**. As the codebase matures, we may introduce this capability, but for now, the `Down` function should remain empty.
|
||||
- Always write **idempotent** migrations. This means that if the migration is run multiple times, it should not cause an error.
|
||||
- A migration which is **dependent on the underlying dialect** (sqlite, postgres, etc) should be written as part of the [SQLDialect](/pkg/sqlstore/sqlstore.go) interface. The implementation needs to go in the dialect specific package of the respective database.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Use `BunDBCtx` and `RunInTxCtx` to access the database instance and execute transactions respectively.
|
||||
- While designing new tables, ensure the consistency of `id`, `created_at`, `updated_at` and an `org_id` column with a foreign key constraint to the `organizations` table (unless the table serves as a transitive entity not directly associated with an organization but indirectly associated with one).
|
||||
- Implement deletion logic in the application rather than relying on cascading deletes in the database.
|
||||
- While writing migrations, adhere to the guidelines mentioned above.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 143 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 157 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 76 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 306 KiB |
@@ -1,246 +0,0 @@
|
||||
# Configuring OpenTelemetry Demo App with SigNoz
|
||||
|
||||
[The OpenTelemetry Astronomy Shop](https://github.com/open-telemetry/opentelemetry-demo) is an e-commerce web application, with **15 core microservices** in a **distributed system** which communicate over gRPC. Designed as a **polyglot** environment, it leverages a diverse set of programming languages, including Go, Python, .NET, Java, and others, showcasing cross-language instrumentation with OpenTelemetry. The intention is to get a quickstart application to send data and experience SigNoz firsthand.
|
||||
|
||||
This guide provides a step-by-step walkthrough for setting up the **OpenTelemetry Demo App** with **SigNoz** as backend for observability. It outlines steps to export telemetry data to **SigNoz self-hosted with Docker**, **SigNoz self-hosted with Kubernetes** and **SigNoz cloud**.
|
||||
<br/>
|
||||
|
||||
__Table of Contents__
|
||||
- [Send data to SigNoz Self-hosted with Docker](#send-data-to-signoz-self-hosted-with-docker)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Clone the OpenTelemetry Demo App Repository](#clone-the-opentelemetry-demo-app-repository)
|
||||
- [Modify OpenTelemetry Collector Config](#modify-opentelemetry-collector-config)
|
||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app)
|
||||
- [Monitor with SigNoz (Docker)](#monitor-with-signoz-docker)
|
||||
- [Send data to SigNoz Self-hosted with Kubernetes](#send-data-to-signoz-self-hosted-with-kubernetes)
|
||||
- [Prerequisites](#prerequisites-1)
|
||||
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
|
||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
|
||||
- [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
|
||||
- [What's next](#whats-next)
|
||||
|
||||
|
||||
# Send data to SigNoz Self-hosted with Docker
|
||||
|
||||
In this guide you will install the OTel demo application using Docker and send telemetry data to SigNoz hosted with Docker, referred as SigNoz [Docker] from now.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
- Docker and Docker Compose installed
|
||||
- 6 GB of RAM for the application [as per OpenTelemetry documentation]
|
||||
- Nice to have Docker Desktop, for easy monitoring
|
||||
|
||||
|
||||
## Clone the OpenTelemetry Demo App Repository
|
||||
Clone the OTel demo app to any folder of your choice.
|
||||
```sh
|
||||
# Clone the OpenTelemetry Demo repository
|
||||
git clone https://github.com/open-telemetry/opentelemetry-demo.git
|
||||
cd opentelemetry-demo
|
||||
```
|
||||
|
||||
## Modify OpenTelemetry Collector Config
|
||||
|
||||
By default, the collector in the demo application will merge the configuration from two files:
|
||||
|
||||
1. otelcol-config.yml [we don't touch this]
|
||||
2. otelcol-config-extras.yml [we modify this]
|
||||
|
||||
To add SigNoz [Docker] as the backend, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
|
||||
```yaml
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "http://host.docker.internal:4317"
|
||||
tls:
|
||||
insecure: true
|
||||
debug:
|
||||
verbosity: detailed
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
|
||||
The SigNoz OTel collector [sigNoz's otel-collector service] listens at 4317 port on localhost. When the OTel demo app is running within a Docker container and needs to transmit telemetry data to SigNoz, it cannot directly reference 'localhost' as this would refer to the container's own internal network. Instead, Docker provides a special DNS name, `host.docker.internal`, which resolves to the host machine's IP address from within containers. By configuring the OpenTelemetry Demo application to send data to `host.docker.internal:4317`, we establish a network path that allows the containerized application to transmit telemetry data across the container boundary to the SigNoz OTel collector running on the host machine's port 4317.
|
||||
|
||||
>
|
||||
> Note: When merging extra configuration values with the existing collector config (`src/otel-collector/otelcol-config.yml`), objects are merged and arrays are replaced resulting in previous pipeline configurations getting overridden.
|
||||
The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
|
||||
>
|
||||
<br>
|
||||
<u>To send data to SigNoz Cloud</u>
|
||||
|
||||
If you want to send data to cloud instead, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
|
||||
```yaml
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
signoz-access-token: <SIGNOZ-KEY>
|
||||
debug:
|
||||
verbosity: detailed
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
Remember to replace the region and ingestion key with proper values as obtained from your account.
|
||||
|
||||
|
||||
## Start the OpenTelemetry Demo App
|
||||
|
||||
Both SigNoz and OTel demo app [frontend-proxy service, to be accurate] share common port allocation at 8080. To prevent port allocation conflicts, modify the OTel demo application config to use port 8081 as the `ENVOY_PORT` value as shown below, and run docker compose command.
|
||||
|
||||
```sh
|
||||
ENVOY_PORT=8081 docker compose up -d
|
||||
```
|
||||
This spins up multiple microservices, with OpenTelemetry instrumentation enabled. you can verify this by,
|
||||
```sh
|
||||
docker compose ps -a
|
||||
```
|
||||
The result should look similar to this,
|
||||

|
||||
|
||||
|
||||
|
||||
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Docker].
|
||||
|
||||
## Monitor with SigNoz [Docker]
|
||||
Signoz exposes its UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
|
||||
|
||||
|
||||

|
||||
|
||||
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Docker] as expected.
|
||||
|
||||
|
||||
# Send data to SigNoz Self-hosted with Kubernetes
|
||||
|
||||
In this guide you will install the OTel demo application using Helm and send telemetry data to SigNoz hosted with Kubernetes, referred as SigNoz [Kubernetes] from now.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Helm charts installed
|
||||
- 6 GB of free RAM for the application [as per OpenTelemetry documentation]
|
||||
- A kubernetes cluster (EKS, GKE, Minikube)
|
||||
- kubectl [CLI for Kubernetes]
|
||||
|
||||
>Note: We will be installing OTel demo app using Helm charts, since it is recommended by OpenTelemetry. If you wish to install using kubectl, follow [this](https://opentelemetry.io/docs/demo/kubernetes-deployment/#install-using-kubectl).
|
||||
|
||||
|
||||
## Install Helm Repo and Charts
|
||||
You’ll need to **install the Helm repository** to start sending data to SigNoz cloud.
|
||||
|
||||
```sh
|
||||
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
|
||||
```
|
||||
The OpenTelemetry Collector’s configuration is exposed in the Helm chart. All additions made will be merged into the default configuration. We use this capability to add SigNoz as an exporter, and make pipelines as desired.
|
||||
|
||||
For this we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
|
||||
|
||||
```yaml
|
||||
default:
|
||||
env:
|
||||
- name: OTEL_SERVICE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: "metadata.labels['app.kubernetes.io/component']"
|
||||
- name: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
|
||||
value: cumulative
|
||||
- name: OTEL_RESOURCE_ATTRIBUTES
|
||||
value: 'service.name=$(OTEL_SERVICE_NAME),service.namespace=opentelemetry-demo'
|
||||
- name: OTEL_COLLECTOR_NAME
|
||||
value: signoz-otel-collector.<namespace>.svc.cluster.local
|
||||
```
|
||||
Replace namespace with your appropriate namespace. This file will replace the chart’s existing settings with our new ones, ensuring telemetry data is sent to SigNoz [Kubernetes].
|
||||
|
||||
> Note: When merging YAML values with Helm, objects are merged and arrays are replaced. The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
|
||||
|
||||
<br>
|
||||
<u>To send data to SigNoz cloud</u>
|
||||
|
||||
If you wish to send data to cloud instance of SigNoz, we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
|
||||
|
||||
```sh
|
||||
opentelemetry-collector:
|
||||
config:
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
signoz-access-token: <SIGNOZ-KEY>
|
||||
debug:
|
||||
verbosity: detailed
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
Make sure to replace the region and key with values obtained from the account
|
||||
|
||||
Now **install the helm chart** with a release name and namespace of your choice. Let's take *my-otel-demo* as the release name and *otel-demo* as the namespace for the context of the code snippet below,
|
||||
|
||||
```sh
|
||||
# Create a new Kubernetes namespace called "otel-demo"
|
||||
kubectl create namespace otel-demo
|
||||
# Install the OpenTelemetry Demo Helm chart with the release name "my-otel-demo"
|
||||
helm install my-otel-demo open-telemetry/opentelemetry-demo --namespace otel-demo -f values.yaml
|
||||
```
|
||||
You should see a similar output on your terminal,
|
||||

|
||||
|
||||
To verify if all the pods are running,
|
||||
```sh
|
||||
kubectl get pods -n otel-demo
|
||||
```
|
||||
The output should look similar to this,
|
||||
|
||||

|
||||
|
||||
## Start the OpenTelemetry Demo App
|
||||
|
||||
To expose the OTel demo app UI [frontend-proxy service] use the following command (replace my-otel-demo with your Helm chart release name):
|
||||
|
||||
```sh
|
||||
kubectl port-forward svc/my-otel-demo-frontend-proxy 8080:8081
|
||||
```
|
||||
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Kubernetes].
|
||||
|
||||
|
||||
|
||||
## Monitor with SigNoz [Kubernetes]
|
||||
Signoz exposes it's UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
|
||||
|
||||
|
||||

|
||||
|
||||
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Kubernetes] as expected.
|
||||
|
||||
|
||||
|
||||
# What's next?
|
||||
|
||||
|
||||
|
||||
Don't forget to check our OpenTelemetry [track](https://signoz.io/resource-center/opentelemetry/), guaranteed to take you from a newbie to sensei in no time!
|
||||
|
||||
Also from a fellow OTel fan to another, we at [SigNoz](https://signoz.io/) are building an open-source, OTel native, observability platform (one of its kind). So, show us love - star us on [GitHub](https://github.com/SigNoz/signoz), nitpick our [docs](https://signoz.io/docs/introduction/), or just tell your app we’re the ones who’ll catch its crashes mid-flight and finally shush all the 3am panic calls!
|
||||
36
ee/http/middleware/pat.go
Normal file
36
ee/http/middleware/pat.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type Pat struct {
|
||||
uuid *authtypes.UUID
|
||||
headers []string
|
||||
}
|
||||
|
||||
func NewPat(headers []string) *Pat {
|
||||
return &Pat{uuid: authtypes.NewUUID(), headers: headers}
|
||||
}
|
||||
|
||||
func (p *Pat) Wrap(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var values []string
|
||||
for _, header := range p.headers {
|
||||
values = append(values, r.Header.Get(header))
|
||||
}
|
||||
|
||||
ctx, err := p.uuid.ContextFromRequest(r.Context(), values...)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package licensing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
)
|
||||
|
||||
var (
|
||||
config licensing.Config
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// initializes the licensing configuration
|
||||
func Config(pollInterval time.Duration, failureThreshold int) licensing.Config {
|
||||
once.Do(func() {
|
||||
config = licensing.Config{PollInterval: pollInterval, FailureThreshold: failureThreshold}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(fmt.Errorf("invalid licensing config: %w", err))
|
||||
}
|
||||
})
|
||||
|
||||
return config
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
package httplicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type licensingAPI struct {
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func NewLicensingAPI(licensing licensing.Licensing) licensing.API {
|
||||
return &licensingAPI{licensing: licensing}
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Activate(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
req := new(licensetypes.PostableLicense)
|
||||
err = json.NewDecoder(r.Body).Decode(&req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = api.licensing.Activate(r.Context(), orgID, req.Key)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusAccepted, nil)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) GetActive(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
license, err := api.licensing.GetActive(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableLicense := licensetypes.NewGettableLicense(license.Data, license.Key)
|
||||
render.Success(rw, http.StatusOK, gettableLicense)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Refresh(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
err = api.licensing.Refresh(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Checkout(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
req := new(licensetypes.PostableSubscription)
|
||||
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableSubscription, err := api.licensing.Checkout(ctx, orgID, req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusCreated, gettableSubscription)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Portal(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
req := new(licensetypes.PostableSubscription)
|
||||
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableSubscription, err := api.licensing.Portal(ctx, orgID, req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusCreated, gettableSubscription)
|
||||
}
|
||||
@@ -1,297 +0,0 @@
|
||||
package httplicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
store licensetypes.Store
|
||||
zeus zeus.Zeus
|
||||
config licensing.Config
|
||||
settings factory.ScopedProviderSettings
|
||||
orgGetter organization.Getter
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
func NewProviderFactory(store sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
|
||||
return New(ctx, providerSettings, config, store, zeus, orgGetter)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, ps factory.ProviderSettings, config licensing.Config, sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter) (licensing.Licensing, error) {
|
||||
settings := factory.NewScopedProviderSettings(ps, "github.com/SigNoz/signoz/ee/licensing/httplicensing")
|
||||
licensestore := sqllicensingstore.New(sqlstore)
|
||||
return &provider{
|
||||
store: licensestore,
|
||||
zeus: zeus,
|
||||
config: config,
|
||||
settings: settings,
|
||||
orgGetter: orgGetter,
|
||||
stopChan: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
tick := time.NewTicker(provider.config.PollInterval)
|
||||
defer tick.Stop()
|
||||
|
||||
err := provider.Validate(ctx)
|
||||
if err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-provider.stopChan:
|
||||
return nil
|
||||
case <-tick.C:
|
||||
err := provider.Validate(ctx)
|
||||
if err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
provider.settings.Logger().DebugContext(ctx, "license validation stopped")
|
||||
close(provider.stopChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Validate(ctx context.Context) error {
|
||||
organizations, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, organization := range organizations {
|
||||
err := provider.Refresh(ctx, organization.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(organizations) == 0 {
|
||||
err = provider.InitFeatures(ctx, licensetypes.BasicPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Activate(ctx context.Context, organizationID valuer.UUID, key string) error {
|
||||
data, err := provider.zeus.GetLicense(ctx, key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to fetch license data with upstream server")
|
||||
}
|
||||
|
||||
license, err := licensetypes.NewLicense(data, organizationID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to create license entity")
|
||||
}
|
||||
|
||||
storableLicense := licensetypes.NewStorableLicenseFromLicense(license)
|
||||
err = provider.store.Create(ctx, storableLicense)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = provider.InitFeatures(ctx, license.Features)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetActive(ctx context.Context, organizationID valuer.UUID) (*licensetypes.License, error) {
|
||||
storableLicenses, err := provider.store.GetAll(ctx, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeLicense, err := licensetypes.GetActiveLicenseFromStorableLicenses(storableLicenses, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return activeLicense, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUID) error {
|
||||
activeLicense, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue())
|
||||
return err
|
||||
}
|
||||
|
||||
if err != nil && errors.Ast(err, errors.TypeNotFound) {
|
||||
provider.settings.Logger().DebugContext(ctx, "no active license found, defaulting to basic plan", "org_id", organizationID.StringValue())
|
||||
err = provider.InitFeatures(ctx, licensetypes.BasicPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := provider.zeus.GetLicense(ctx, activeLicense.Key)
|
||||
if err != nil {
|
||||
if time.Since(activeLicense.LastValidatedAt) > time.Duration(provider.config.FailureThreshold)*provider.config.PollInterval {
|
||||
provider.settings.Logger().ErrorContext(ctx, "license validation failed for consecutive poll intervals, defaulting to basic plan", "failure_threshold", provider.config.FailureThreshold, "license_id", activeLicense.ID.StringValue(), "org_id", organizationID.StringValue())
|
||||
err = provider.InitFeatures(ctx, licensetypes.BasicPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = activeLicense.Update(data)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to create license entity from license data")
|
||||
}
|
||||
|
||||
updatedStorableLicense := licensetypes.NewStorableLicenseFromLicense(activeLicense)
|
||||
err = provider.store.Update(ctx, organizationID, updatedStorableLicense)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Checkout(ctx context.Context, organizationID valuer.UUID, postableSubscription *licensetypes.PostableSubscription) (*licensetypes.GettableSubscription, error) {
|
||||
activeLicense, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, err := json.Marshal(postableSubscription)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to marshal checkout payload")
|
||||
}
|
||||
|
||||
response, err := provider.zeus.GetCheckoutURL(ctx, activeLicense.Key, body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to generate checkout session")
|
||||
}
|
||||
|
||||
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Portal(ctx context.Context, organizationID valuer.UUID, postableSubscription *licensetypes.PostableSubscription) (*licensetypes.GettableSubscription, error) {
|
||||
activeLicense, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, err := json.Marshal(postableSubscription)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to marshal portal payload")
|
||||
}
|
||||
|
||||
response, err := provider.zeus.GetPortalURL(ctx, activeLicense.Key, body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to generate portal session")
|
||||
}
|
||||
|
||||
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
|
||||
}
|
||||
|
||||
// feature surrogate
|
||||
func (provider *provider) CheckFeature(ctx context.Context, key string) error {
|
||||
feature, err := provider.store.GetFeature(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if feature.Active {
|
||||
return nil
|
||||
}
|
||||
return errors.Newf(errors.TypeUnsupported, licensing.ErrCodeFeatureUnavailable, "feature unavailable: %s", key)
|
||||
}
|
||||
|
||||
func (provider *provider) GetFeatureFlag(ctx context.Context, key string) (*featuretypes.GettableFeature, error) {
|
||||
featureStatus, err := provider.store.GetFeature(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &featuretypes.GettableFeature{
|
||||
Name: featureStatus.Name,
|
||||
Active: featureStatus.Active,
|
||||
Usage: int64(featureStatus.Usage),
|
||||
UsageLimit: int64(featureStatus.UsageLimit),
|
||||
Route: featureStatus.Route,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetFeatureFlags(ctx context.Context) ([]*featuretypes.GettableFeature, error) {
|
||||
storableFeatures, err := provider.store.GetAllFeatures(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gettableFeatures := make([]*featuretypes.GettableFeature, len(storableFeatures))
|
||||
for idx, gettableFeature := range storableFeatures {
|
||||
gettableFeatures[idx] = &featuretypes.GettableFeature{
|
||||
Name: gettableFeature.Name,
|
||||
Active: gettableFeature.Active,
|
||||
Usage: int64(gettableFeature.Usage),
|
||||
UsageLimit: int64(gettableFeature.UsageLimit),
|
||||
Route: gettableFeature.Route,
|
||||
}
|
||||
}
|
||||
|
||||
if constants.IsDotMetricsEnabled {
|
||||
gettableFeatures = append(gettableFeatures, &featuretypes.GettableFeature{
|
||||
Name: featuretypes.DotMetricsEnabled,
|
||||
Active: true,
|
||||
})
|
||||
}
|
||||
|
||||
return gettableFeatures, nil
|
||||
}
|
||||
|
||||
func (provider *provider) InitFeatures(ctx context.Context, features []*featuretypes.GettableFeature) error {
|
||||
featureStatus := make([]*featuretypes.StorableFeature, len(features))
|
||||
for i, f := range features {
|
||||
featureStatus[i] = &featuretypes.StorableFeature{
|
||||
Name: f.Name,
|
||||
Active: f.Active,
|
||||
Usage: int(f.Usage),
|
||||
UsageLimit: int(f.UsageLimit),
|
||||
Route: f.Route,
|
||||
}
|
||||
}
|
||||
|
||||
return provider.store.InitFeatures(ctx, featureStatus)
|
||||
}
|
||||
|
||||
func (provider *provider) UpdateFeatureFlag(ctx context.Context, feature *featuretypes.GettableFeature) error {
|
||||
return provider.store.UpdateFeature(ctx, &featuretypes.StorableFeature{
|
||||
Name: feature.Name,
|
||||
Active: feature.Active,
|
||||
Usage: int(feature.Usage),
|
||||
UsageLimit: int(feature.UsageLimit),
|
||||
Route: feature.Route,
|
||||
})
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package sqllicensingstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type store struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func New(sqlstore sqlstore.SQLStore) licensetypes.Store {
|
||||
return &store{sqlstore}
|
||||
}
|
||||
|
||||
func (store *store) Create(ctx context.Context, storableLicense *licensetypes.StorableLicense) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(storableLicense).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return store.sqlstore.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "license with ID: %s already exists", storableLicense.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) Get(ctx context.Context, organizationID valuer.UUID, licenseID valuer.UUID) (*licensetypes.StorableLicense, error) {
|
||||
storableLicense := new(licensetypes.StorableLicense)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(storableLicense).
|
||||
Where("org_id = ?", organizationID).
|
||||
Where("id = ?", licenseID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "license with ID: %s does not exist", licenseID)
|
||||
}
|
||||
|
||||
return storableLicense, nil
|
||||
}
|
||||
|
||||
func (store *store) GetAll(ctx context.Context, organizationID valuer.UUID) ([]*licensetypes.StorableLicense, error) {
|
||||
storableLicenses := make([]*licensetypes.StorableLicense, 0)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(&storableLicenses).
|
||||
Where("org_id = ?", organizationID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "licenses for organizationID: %s does not exists", organizationID)
|
||||
}
|
||||
|
||||
return storableLicenses, nil
|
||||
}
|
||||
|
||||
func (store *store) Update(ctx context.Context, organizationID valuer.UUID, storableLicense *licensetypes.StorableLicense) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewUpdate().
|
||||
Model(storableLicense).
|
||||
WherePK().
|
||||
Where("org_id = ?", organizationID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to update license with ID: %s", storableLicense.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) CreateFeature(ctx context.Context, storableFeature *featuretypes.StorableFeature) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(storableFeature).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return store.sqlstore.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "feature with name:%s already exists", storableFeature.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) GetFeature(ctx context.Context, key string) (*featuretypes.StorableFeature, error) {
|
||||
storableFeature := new(featuretypes.StorableFeature)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(storableFeature).
|
||||
Where("name = ?", key).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "feature with name:%s does not exist", key)
|
||||
}
|
||||
|
||||
return storableFeature, nil
|
||||
}
|
||||
|
||||
func (store *store) GetAllFeatures(ctx context.Context) ([]*featuretypes.StorableFeature, error) {
|
||||
storableFeatures := make([]*featuretypes.StorableFeature, 0)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(&storableFeatures).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "features do not exist")
|
||||
}
|
||||
|
||||
return storableFeatures, nil
|
||||
}
|
||||
|
||||
func (store *store) InitFeatures(ctx context.Context, storableFeatures []*featuretypes.StorableFeature) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(&storableFeatures).
|
||||
On("CONFLICT (name) DO UPDATE").
|
||||
Set("active = EXCLUDED.active").
|
||||
Set("usage = EXCLUDED.usage").
|
||||
Set("usage_limit = EXCLUDED.usage_limit").
|
||||
Set("route = EXCLUDED.route").
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to initialise features")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) UpdateFeature(ctx context.Context, storableFeature *featuretypes.StorableFeature) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewUpdate().
|
||||
Model(storableFeature).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to update feature with key: %s", storableFeature.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
version: 2
|
||||
|
||||
project_name: signoz
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
- id: signoz
|
||||
binary: bin/signoz
|
||||
main: ee/query-service/main.go
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}
|
||||
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
|
||||
{{- end }}
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
goamd64:
|
||||
- v1
|
||||
goarm64:
|
||||
- v8.0
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/SigNoz/signoz/pkg/version.version=v{{ .Version }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
tags:
|
||||
- timetzdata
|
||||
|
||||
archives:
|
||||
- formats:
|
||||
- tar.gz
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_{{- .Os }}_{{- .Arch }}
|
||||
wrap_in_directory: true
|
||||
strip_binary_directory: false
|
||||
files:
|
||||
- src: README.md
|
||||
dst: README.md
|
||||
- src: LICENSE
|
||||
dst: LICENSE
|
||||
- src: frontend/build
|
||||
dst: web
|
||||
- src: conf
|
||||
dst: conf
|
||||
- src: templates
|
||||
dst: templates
|
||||
|
||||
release:
|
||||
name_template: "v{{ .Version }}"
|
||||
draft: false
|
||||
prerelease: auto
|
||||
@@ -1,21 +1,34 @@
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.20.3
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
|
||||
# define arguments that can be passed during build time
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
# add ca-certificates in case you need them
|
||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||
|
||||
# set working directory
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
# copy the query-service binary
|
||||
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
# copy prometheus YAML config
|
||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||
COPY pkg/query-service/templates /root/templates
|
||||
|
||||
# Make query-service executable for non-root users
|
||||
RUN chmod 755 /root /root/query-service
|
||||
|
||||
COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
# Copy frontend
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
# run the binary
|
||||
ENTRYPOINT ["./query-service"]
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
FROM golang:1.23-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
ARG ZEUSURL
|
||||
|
||||
# This path is important for stacktraces
|
||||
WORKDIR $GOPATH/src/github.com/signoz/signoz
|
||||
WORKDIR /root
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
gcc \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY ./ee/ ./ee/
|
||||
COPY ./pkg/ ./pkg/
|
||||
COPY ./templates/email /root/templates
|
||||
|
||||
COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz"]
|
||||
@@ -1,22 +0,0 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -3,9 +3,8 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
@@ -29,16 +28,17 @@ func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvi
|
||||
}
|
||||
|
||||
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
FeatureLookup: dp.ff,
|
||||
})
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
|
||||
@@ -3,9 +3,8 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
@@ -29,16 +28,17 @@ func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyPr
|
||||
}
|
||||
|
||||
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
FeatureLookup: hp.ff,
|
||||
})
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
type Seasonality string
|
||||
|
||||
@@ -2,10 +2,8 @@ package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
}
|
||||
|
||||
@@ -5,12 +5,11 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -39,6 +38,12 @@ func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericPr
|
||||
}
|
||||
}
|
||||
|
||||
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().ff = ff
|
||||
}
|
||||
}
|
||||
|
||||
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().reader = reader
|
||||
@@ -51,6 +56,7 @@ type BaseSeasonalProvider struct {
|
||||
fluxInterval time.Duration
|
||||
cache cache.Cache
|
||||
keyGenerator cache.KeyGenerator
|
||||
ff interfaces.FeatureLookup
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
|
||||
@@ -60,9 +66,9 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentPeriodQuery)
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -73,7 +79,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastPeriodQuery)
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -84,7 +90,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentSeasonQuery)
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -95,7 +101,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastSeasonQuery)
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,7 +112,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past2SeasonQuery)
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -117,7 +123,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past3SeasonQuery)
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -307,9 +313,6 @@ func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
if expectedValue < 0 {
|
||||
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
@@ -336,9 +339,9 @@ func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
|
||||
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,9 +3,8 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
@@ -28,16 +27,17 @@ func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyPr
|
||||
}
|
||||
|
||||
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
FeatureLookup: wp.ff,
|
||||
})
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
return p.getAnomalies(ctx, req)
|
||||
}
|
||||
|
||||
@@ -1,38 +1,43 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/apis/fields"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
"go.signoz.io/signoz/pkg/alertmanager"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferSpanMetrics bool
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
UsageManager *usage.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
@@ -49,17 +54,22 @@ type APIHandler struct {
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
|
||||
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
||||
FieldsAPI: fields.NewAPI(signoz.TelemetryStore, signoz.Instrumentation.Logger()),
|
||||
Signoz: signoz,
|
||||
})
|
||||
|
||||
@@ -74,53 +84,99 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
|
||||
return ah, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) FF() baseint.FeatureLookup {
|
||||
return ah.opts.FeatureFlags
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RM() *rules.Manager {
|
||||
return ah.opts.RulesManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) LM() *license.Manager {
|
||||
return ah.opts.LicenseManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) UM() *usage.Manager {
|
||||
return ah.opts.UsageManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||
return ah.opts.AppDao
|
||||
}
|
||||
|
||||
func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
|
||||
return ah.opts.Gateway
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CheckFeature(ctx context.Context, key string) bool {
|
||||
err := ah.Signoz.Licensing.CheckFeature(ctx, key)
|
||||
func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
err := ah.FF().CheckFeature(f)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags", am.OpenAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/loginPrecheck", am.OpenAccess(ah.Signoz.Handlers.User.LoginPrecheck)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
am.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// invite
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.Signoz.Handlers.User.GetInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/accept", am.OpenAccess(ah.Signoz.Handlers.User.AcceptInvite)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
am.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
am.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/complete/google",
|
||||
am.OpenAccess(ah.receiveGoogleAuth)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
am.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
am.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.LicensingAPI.Checkout)).Methods(http.MethodPost)
|
||||
// PAT APIs
|
||||
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
|
||||
|
||||
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.LicensingAPI.Portal)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||
|
||||
// v3
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Activate)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Refresh)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.LicensingAPI.GetActive)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
|
||||
|
||||
// v4
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
@@ -132,7 +188,7 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
|
||||
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||
|
||||
@@ -144,8 +200,9 @@ func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *mi
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
versionResponse := basemodel.GetVersionResponse{
|
||||
Version: version.Info.Version(),
|
||||
Version: version,
|
||||
EE: "Y",
|
||||
SetupCompleted: ah.SetupCompleted,
|
||||
}
|
||||
|
||||
@@ -3,16 +3,192 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func parseRequest(r *http.Request, req interface{}) error {
|
||||
defer r.Body.Close()
|
||||
requestBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
return err
|
||||
}
|
||||
|
||||
// loginUser overrides base handler and considers SSO case.
|
||||
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := basemodel.LoginRequest{}
|
||||
err := parseRequest(r, &req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||
var apierr basemodel.BaseApiError
|
||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||
if apierr != nil && !apierr.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// if all looks good, call auth
|
||||
resp, err := baseauth.Login(ctx, &req, ah.opts.JWT)
|
||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// registerUser registers a user and responds with a precheck
|
||||
// so the front-end can decide the login method
|
||||
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
ah.APIHandler.Register(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var req *baseauth.RegisterRequest
|
||||
|
||||
defer r.Body.Close()
|
||||
requestBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
zap.L().Error("received no input in api", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if invite == nil {
|
||||
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &basemodel.PrecheckResponse{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, create user and respond precheck data
|
||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var precheckError basemodel.BaseApiError
|
||||
|
||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||
if precheckError != nil {
|
||||
RespondError(w, precheckError, precheckResp)
|
||||
}
|
||||
|
||||
} else {
|
||||
// no-sso, validate password
|
||||
if err := baseauth.ValidatePassword(req.Password); err != nil {
|
||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager)
|
||||
if !registerError.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp.IsUser = true
|
||||
}
|
||||
|
||||
ah.Respond(w, precheckResp)
|
||||
}
|
||||
|
||||
// getInvite returns the invite object details for the given invite token. We do not need to
|
||||
// protect this API because invite token itself is meant to be private.
|
||||
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
token := mux.Vars(r)["token"]
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
ctx := context.Background()
|
||||
|
||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp := model.GettableInvitation{
|
||||
InvitationResponseObject: inviteObject,
|
||||
}
|
||||
|
||||
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
|
||||
resp.Precheck = precheck
|
||||
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// PrecheckLogin enables browser login page to display appropriate
|
||||
// login methods
|
||||
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
email := r.URL.Query().Get("email")
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
|
||||
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
@@ -21,12 +197,84 @@ func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveGoogleAuth completes google OAuth response and forwards a request
|
||||
// to front-end to sign user in
|
||||
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
q := r.URL.Query()
|
||||
if errType := q.Get("error"); errType != "" {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
relayState := q.Get("state")
|
||||
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// now that we have domain, use domain to fetch sso settings.
|
||||
// prepare google callback handler using parsedState -
|
||||
// which contains redirect URL (front-end endpoint)
|
||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to prepare google oauth provider", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
identity, err := callbackHandler.HandleCallback(r)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
@@ -50,25 +298,12 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.Signoz.Modules.User.GetDomainFromSsoResponse(ctx, parsedState)
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(domain.OrgID)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||
@@ -96,7 +331,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -30,18 +30,6 @@ type CloudIntegrationConnectionParamsResponse struct {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
if cloudProvider != "aws" {
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf(
|
||||
@@ -50,7 +38,15 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
||||
currentUser, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
|
||||
"couldn't deduce current user: %w", err,
|
||||
)), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgID, cloudProvider)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't provision PAT for cloud integration:",
|
||||
@@ -62,9 +58,11 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
SigNozAPIKey: apiKey,
|
||||
}
|
||||
|
||||
license, err := ah.Signoz.Licensing.GetActive(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
license, apiErr := ah.LM().GetRepo().GetActiveLicense(r.Context())
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't look for active license",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -120,14 +118,7 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
return "", apiErr
|
||||
}
|
||||
|
||||
orgIdUUID, err := valuer.NewUUID(orgId)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't parse orgId: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
|
||||
allPats, err := ah.AppDao().ListPATs(ctx)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't list PATs: %w", err,
|
||||
@@ -144,36 +135,32 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newPAT, err := types.NewStorableAPIKey(
|
||||
integrationPATName,
|
||||
integrationUser.ID,
|
||||
types.RoleViewer,
|
||||
0,
|
||||
)
|
||||
newPAT := model.PAT{
|
||||
Token: generatePATToken(),
|
||||
UserID: integrationUser.ID,
|
||||
Name: integrationPATName,
|
||||
Role: baseconstants.ViewerGroup,
|
||||
ExpiresAt: 0,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
UpdatedAt: time.Now().Unix(),
|
||||
}
|
||||
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
))
|
||||
}
|
||||
return newPAT.Token, nil
|
||||
return integrationPAT.Token, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
|
||||
integrationUserResult, err := ah.Signoz.Modules.User.GetUserByEmailInOrg(ctx, orgId, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, basemodel.NotFoundError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
|
||||
}
|
||||
|
||||
if integrationUserResult != nil {
|
||||
@@ -185,18 +172,33 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newUser, err := types.NewUser(cloudIntegrationUser, email, types.RoleViewer.String(), orgId)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration user: %w", err,
|
||||
))
|
||||
newUser := &types.User{
|
||||
ID: cloudIntegrationUserId,
|
||||
Name: fmt.Sprintf("%s integration", cloudProvider),
|
||||
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgId,
|
||||
}
|
||||
|
||||
password, err := types.NewFactorPassword(uuid.NewString())
|
||||
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
|
||||
}
|
||||
newUser.GroupID = viewerGroup.ID
|
||||
|
||||
integrationUser, err := ah.Signoz.Modules.User.CreateUserWithPassword(ctx, newUser, password)
|
||||
passwordHash, err := auth.PasswordHash(uuid.NewString())
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't hash random password for cloud integration user: %w", err,
|
||||
))
|
||||
}
|
||||
newUser.Password = passwordHash
|
||||
|
||||
integrationUser, apiErr := ah.AppDao().CreateUser(ctx, newUser, false)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't create cloud integration user")
|
||||
}
|
||||
|
||||
return integrationUser, nil
|
||||
|
||||
@@ -4,10 +4,12 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -34,27 +36,26 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
dashboard, err := ah.Signoz.Modules.Dashboard.Get(r.Context(), claims.OrgID, uuid)
|
||||
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := claims.IsAdmin(); err != nil && (dashboard.CreatedBy != claims.Email) {
|
||||
if !auth.IsAdminV2(claims) && (dashboard.CreatedBy != claims.Email) {
|
||||
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
// Lock/Unlock the dashboard
|
||||
err = ah.Signoz.Modules.Dashboard.LockUnlock(r.Context(), claims.OrgID, uuid, lock)
|
||||
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
90
ee/query-service/app/api/domains.go
Normal file
90
ee/query-service/app/api/domains.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
orgId := mux.Vars(r)["orgId"]
|
||||
domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, domains)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, domains)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.OrgDomain{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.ValidNew(); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.OrgDomain{Id: domainId}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
req.Id = domainId
|
||||
if err := req.Valid(nil); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, nil)
|
||||
}
|
||||
@@ -8,30 +8,14 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
pkgError "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, pkgError.Newf(pkgError.TypeInvalidInput, pkgError.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
featureSet, err := ah.Signoz.Licensing.GetFeatureFlags(r.Context())
|
||||
featureSet, err := ah.FF().GetFeatureFlags()
|
||||
if err != nil {
|
||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
@@ -39,7 +23,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if constants.FetchFeatures == "true" {
|
||||
zap.L().Debug("fetching license")
|
||||
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to fetch license", zap.Error(err))
|
||||
} else if license == nil {
|
||||
@@ -60,8 +44,9 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if ah.opts.PreferSpanMetrics {
|
||||
for idx, feature := range featureSet {
|
||||
if feature.Name == featuretypes.UseSpanMetrics {
|
||||
for idx := range featureSet {
|
||||
feature := &featureSet[idx]
|
||||
if feature.Name == basemodel.UseSpanMetrics {
|
||||
featureSet[idx].Active = true
|
||||
}
|
||||
}
|
||||
@@ -72,7 +57,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
|
||||
// and returns the FeatureSet.
|
||||
func fetchZeusFeatures(url, licenseKey string) ([]*featuretypes.GettableFeature, error) {
|
||||
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
|
||||
// Check if the URL is empty
|
||||
if url == "" {
|
||||
return nil, fmt.Errorf("url is empty")
|
||||
@@ -131,14 +116,14 @@ func fetchZeusFeatures(url, licenseKey string) ([]*featuretypes.GettableFeature,
|
||||
}
|
||||
|
||||
type ZeusFeaturesResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data []*featuretypes.GettableFeature `json:"data"`
|
||||
Status string `json:"status"`
|
||||
Data basemodel.FeatureSet `json:"data"`
|
||||
}
|
||||
|
||||
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
|
||||
func MergeFeatureSets(zeusFeatures, internalFeatures []*featuretypes.GettableFeature) []*featuretypes.GettableFeature {
|
||||
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
|
||||
// Create a map to store the merged features
|
||||
featureMap := make(map[string]*featuretypes.GettableFeature)
|
||||
featureMap := make(map[string]basemodel.Feature)
|
||||
|
||||
// Add all features from the otherFeatures set to the map
|
||||
for _, feature := range internalFeatures {
|
||||
@@ -152,7 +137,7 @@ func MergeFeatureSets(zeusFeatures, internalFeatures []*featuretypes.GettableFea
|
||||
}
|
||||
|
||||
// Convert the map back to a FeatureSet slice
|
||||
var mergedFeatures []*featuretypes.GettableFeature
|
||||
var mergedFeatures basemodel.FeatureSet
|
||||
for _, feature := range featureMap {
|
||||
mergedFeatures = append(mergedFeatures, feature)
|
||||
}
|
||||
|
||||
@@ -3,58 +3,58 @@ package api
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestMergeFeatureSets(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
zeusFeatures []*featuretypes.GettableFeature
|
||||
internalFeatures []*featuretypes.GettableFeature
|
||||
expected []*featuretypes.GettableFeature
|
||||
zeusFeatures basemodel.FeatureSet
|
||||
internalFeatures basemodel.FeatureSet
|
||||
expected basemodel.FeatureSet
|
||||
}{
|
||||
{
|
||||
name: "empty zeusFeatures and internalFeatures",
|
||||
zeusFeatures: []*featuretypes.GettableFeature{},
|
||||
internalFeatures: []*featuretypes.GettableFeature{},
|
||||
expected: []*featuretypes.GettableFeature{},
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and empty internalFeatures",
|
||||
zeusFeatures: []*featuretypes.GettableFeature{
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
internalFeatures: []*featuretypes.GettableFeature{},
|
||||
expected: []*featuretypes.GettableFeature{
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty zeusFeatures and non-empty internalFeatures",
|
||||
zeusFeatures: []*featuretypes.GettableFeature{},
|
||||
internalFeatures: []*featuretypes.GettableFeature{
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
expected: []*featuretypes.GettableFeature{
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
|
||||
zeusFeatures: []*featuretypes.GettableFeature{
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
},
|
||||
internalFeatures: []*featuretypes.GettableFeature{
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature4", Active: false},
|
||||
},
|
||||
expected: []*featuretypes.GettableFeature{
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
@@ -63,15 +63,15 @@ func TestMergeFeatureSets(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
|
||||
zeusFeatures: []*featuretypes.GettableFeature{
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
},
|
||||
internalFeatures: []*featuretypes.GettableFeature{
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
},
|
||||
expected: []*featuretypes.GettableFeature{
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
|
||||
@@ -4,27 +4,11 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
validPath := false
|
||||
for _, allowedPrefix := range gateway.AllowedPrefix {
|
||||
if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) {
|
||||
@@ -38,9 +22,9 @@ func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
RespondError(rw, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -3,10 +3,13 @@ package api
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type DayWiseBreakdown struct {
|
||||
@@ -56,6 +59,98 @@ type billingDetails struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ApplyLicenseRequest struct {
|
||||
LicenseKey string `json:"key"`
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
ah.listLicensesV2(w, r)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// return 404 not found if there is no active license
|
||||
if activeLicense == nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO deprecate this when we move away from key for stripe
|
||||
activeLicense.Data["key"] = activeLicense.Key
|
||||
render.Success(w, http.StatusOK, activeLicense.Data)
|
||||
}
|
||||
|
||||
// this function is called by zeus when inserting licenses in the query-service
|
||||
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
var licenseKey ApplyLicenseRequest
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if licenseKey.LicenseKey == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusAccepted, nil)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
apiError := ah.LM().RefreshLicense(r.Context())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
type checkoutResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
RedirectURL string `json:"redirectURL"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
hClient := &http.Client{}
|
||||
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||
licenseResp, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// decode response body
|
||||
var resp checkoutResponse
|
||||
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, resp.Data)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
licenseKey := r.URL.Query().Get("licenseKey")
|
||||
|
||||
@@ -89,3 +184,146 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO(srikanthccv):Fetch the current day usage and add it to the response
|
||||
ah.Respond(w, billingResponse.Data)
|
||||
}
|
||||
|
||||
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||
licensesV2 := []model.License{}
|
||||
for _, l := range licenses {
|
||||
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = model.Basic
|
||||
}
|
||||
licenseV2 := model.License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
PlanDetails: "",
|
||||
FeatureSet: l.Features,
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: model.LicensePlan{
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
}
|
||||
licensesV2 = append(licensesV2, licenseV2)
|
||||
}
|
||||
return licensesV2
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
licenses := convertLicenseV3ToLicenseV2(licensesV3)
|
||||
|
||||
resp := model.Licenses{
|
||||
TrialStart: -1,
|
||||
TrialEnd: -1,
|
||||
OnTrial: false,
|
||||
WorkSpaceBlock: false,
|
||||
TrialConvertedToSubscription: false,
|
||||
GracePeriodEnd: -1,
|
||||
Licenses: licenses,
|
||||
}
|
||||
|
||||
var currentActiveLicenseKey string
|
||||
|
||||
for _, license := range licenses {
|
||||
if license.IsCurrent {
|
||||
currentActiveLicenseKey = license.Key
|
||||
}
|
||||
}
|
||||
|
||||
// For the case when no license is applied i.e community edition
|
||||
// There will be no trial details or license details
|
||||
if currentActiveLicenseKey == "" {
|
||||
ah.Respond(w, resp)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch trial details
|
||||
hClient := &http.Client{}
|
||||
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while creating request for trial details", zap.Error(err))
|
||||
// If there is an error in fetching trial details, we will still return the license details
|
||||
// to avoid blocking the UI
|
||||
ah.Respond(w, resp)
|
||||
return
|
||||
}
|
||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||
trialResp, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||
// If there is an error in fetching trial details, we will still return the license details
|
||||
// to avoid incorrectly blocking the UI
|
||||
ah.Respond(w, resp)
|
||||
return
|
||||
}
|
||||
defer trialResp.Body.Close()
|
||||
|
||||
trialRespBody, err := io.ReadAll(trialResp.Body)
|
||||
|
||||
if err != nil || trialResp.StatusCode != http.StatusOK {
|
||||
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||
// If there is an error in fetching trial details, we will still return the license details
|
||||
// to avoid incorrectly blocking the UI
|
||||
ah.Respond(w, resp)
|
||||
return
|
||||
}
|
||||
|
||||
// decode response body
|
||||
var trialRespData model.SubscriptionServerResp
|
||||
|
||||
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
|
||||
zap.L().Error("Error while decoding trial details", zap.Error(err))
|
||||
// If there is an error in fetching trial details, we will still return the license details
|
||||
// to avoid incorrectly blocking the UI
|
||||
ah.Respond(w, resp)
|
||||
return
|
||||
}
|
||||
|
||||
resp.TrialStart = trialRespData.Data.TrialStart
|
||||
resp.TrialEnd = trialRespData.Data.TrialEnd
|
||||
resp.OnTrial = trialRespData.Data.OnTrial
|
||||
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
|
||||
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
|
||||
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
|
||||
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
type checkoutResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
RedirectURL string `json:"redirectURL"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
hClient := &http.Client{}
|
||||
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||
licenseResp, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// decode response body
|
||||
var resp checkoutResponse
|
||||
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||
RespondError(w, model.InternalError(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, resp.Data)
|
||||
}
|
||||
|
||||
165
ee/query-service/app/api/pat.go
Normal file
165
ee/query-service/app/api/pat.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func generatePATToken() string {
|
||||
// Generate a 32-byte random token.
|
||||
token := make([]byte, 32)
|
||||
rand.Read(token)
|
||||
// Encode the token in base64.
|
||||
encodedToken := base64.StdEncoding.EncodeToString(token)
|
||||
return encodedToken
|
||||
}
|
||||
|
||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.CreatePATRequestBody{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
pat := model.PAT{
|
||||
Name: req.Name,
|
||||
Role: req.Role,
|
||||
ExpiresAt: req.ExpiresInDays,
|
||||
}
|
||||
err = validatePATRequest(pat)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// All the PATs are associated with the user creating the PAT.
|
||||
pat.UserID = user.ID
|
||||
pat.CreatedAt = time.Now().Unix()
|
||||
pat.UpdatedAt = time.Now().Unix()
|
||||
pat.LastUsed = 0
|
||||
pat.Token = generatePATToken()
|
||||
|
||||
if pat.ExpiresAt != 0 {
|
||||
// convert expiresAt to unix timestamp from days
|
||||
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
|
||||
}
|
||||
|
||||
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||
var apierr basemodel.BaseApiError
|
||||
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &pat)
|
||||
}
|
||||
|
||||
func validatePATRequest(req model.PAT) error {
|
||||
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||
return fmt.Errorf("valid role is required")
|
||||
}
|
||||
if req.ExpiresAt < 0 {
|
||||
return fmt.Errorf("valid expiresAt is required")
|
||||
}
|
||||
if req.Name == "" {
|
||||
return fmt.Errorf("valid name is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.PAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = validatePATRequest(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req.UpdatedByUserID = user.ID
|
||||
id := mux.Vars(r)["id"]
|
||||
req.UpdatedAt = time.Now().Unix()
|
||||
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||
var apierr basemodel.BaseApiError
|
||||
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
zap.L().Info("Get PATs for user", zap.String("user_id", user.ID))
|
||||
pats, apierr := ah.AppDao().ListPATs(ctx)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, pats)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
id := mux.Vars(r)["id"]
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("Revoke PAT with id", zap.String("id", id))
|
||||
if apierr := ah.AppDao().RevokePAT(ctx, id, user.ID); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
|
||||
}
|
||||
@@ -6,28 +6,15 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
@@ -42,7 +29,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
queryRangeParams.Version = "v4"
|
||||
|
||||
// add temporality for each metric
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
@@ -98,30 +85,34 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
switch seasonality {
|
||||
case anomaly.SeasonalityWeekly:
|
||||
provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityDaily:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityHourly:
|
||||
provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
default:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
}
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), orgID, &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
|
||||
@@ -3,8 +3,8 @@ package api
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
||||
|
||||
33
ee/query-service/app/api/traces.go
Normal file
33
ee/query-service/app/api/traces.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||
ah.APIHandler.SearchTraces(w, r)
|
||||
return
|
||||
}
|
||||
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
@@ -5,35 +5,38 @@ import (
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
type ClickhouseReader struct {
|
||||
conn clickhouse.Conn
|
||||
appdb sqlstore.SQLStore
|
||||
appdb *sqlx.DB
|
||||
*basechr.ClickHouseReader
|
||||
}
|
||||
|
||||
func NewDataConnector(
|
||||
sqlDB sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
localDB *sqlx.DB,
|
||||
ch clickhouse.Conn,
|
||||
promConfigPath string,
|
||||
lm interfaces.FeatureLookup,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickhouseReader {
|
||||
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, fluxIntervalForTraceDetail, cache)
|
||||
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
return &ClickhouseReader{
|
||||
conn: telemetryStore.ClickhouseDB(),
|
||||
appdb: sqlDB,
|
||||
conn: ch,
|
||||
appdb: localDB,
|
||||
ClickHouseReader: chReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) GetSQLStore() sqlstore.SQLStore {
|
||||
return r.appdb
|
||||
func (r *ClickhouseReader) Start(readerReady chan bool) {
|
||||
r.ClickHouseReader.Start(readerReady)
|
||||
}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
package smart
|
||||
package db
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
|
||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||
var spans []*SpanForTraceDetails
|
||||
var spans []*model.SpanForTraceDetails
|
||||
|
||||
// if targetSpanId is null or not present then randomly select a span as targetSpanId
|
||||
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
|
||||
@@ -23,7 +24,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &SpanForTraceDetails{
|
||||
span := &model.SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
@@ -44,7 +45,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetSpan := &SpanForTraceDetails{}
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
|
||||
// Find the target span in the span trees
|
||||
for _, root := range roots {
|
||||
@@ -64,7 +65,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Build the final result
|
||||
parents := []*SpanForTraceDetails{}
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
|
||||
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
|
||||
preParent := targetSpan
|
||||
@@ -89,11 +90,11 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Get the child spans of the target span until the given levelDown and spanLimit
|
||||
preParents := []*SpanForTraceDetails{targetSpan}
|
||||
children := []*SpanForTraceDetails{}
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
|
||||
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
|
||||
parents := []*SpanForTraceDetails{}
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
if spanLimit-len(parent.Children) <= 0 {
|
||||
children = append(children, parent.Children[:spanLimit]...)
|
||||
@@ -107,7 +108,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Store the final list of spans in the resultSpanSet map to avoid duplicates
|
||||
resultSpansSet := make(map[*SpanForTraceDetails]struct{})
|
||||
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
|
||||
resultSpansSet[targetSpan] = struct{}{}
|
||||
for _, parent := range parents {
|
||||
resultSpansSet[parent] = struct{}{}
|
||||
@@ -168,12 +169,12 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// buildSpanTrees builds trees of spans from a list of spans.
|
||||
func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, error) {
|
||||
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
|
||||
|
||||
// Build a map of spanID to span for fast lookup
|
||||
var roots []*SpanForTraceDetails
|
||||
var roots []*model.SpanForTraceDetails
|
||||
spans := *spansPtr
|
||||
mapOfSpans := make(map[string]*SpanForTraceDetails, len(spans))
|
||||
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
|
||||
|
||||
for _, span := range spans {
|
||||
if span.ParentID == "" {
|
||||
@@ -205,8 +206,8 @@ func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, e
|
||||
}
|
||||
|
||||
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
|
||||
func breadthFirstSearch(spansPtr *SpanForTraceDetails, targetId string) (*SpanForTraceDetails, error) {
|
||||
queue := []*SpanForTraceDetails{spansPtr}
|
||||
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
|
||||
queue := []*model.SpanForTraceDetails{spansPtr}
|
||||
visited := make(map[string]bool)
|
||||
|
||||
for len(queue) > 0 {
|
||||
@@ -2,6 +2,7 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -11,51 +12,70 @@ import (
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/app/api"
|
||||
"github.com/SigNoz/signoz/ee/query-service/app/db"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/rules"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
eemiddleware "go.signoz.io/signoz/ee/http/middleware"
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/auth"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/alertmanager"
|
||||
"go.signoz.io/signoz/pkg/http/middleware"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferSpanMetrics bool
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
FluxIntervalForTraceDetail string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
Jwt *authtypes.JWT
|
||||
}
|
||||
|
||||
@@ -87,34 +107,88 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore.BunDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set license manager as feature flag provider in dao
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
var reader interfaces.DataConnector
|
||||
qb := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(
|
||||
serverOptions.PromConfigPath,
|
||||
serverOptions.RuleRepoURL,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
serverOptions.SigNoz.Cache,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
serverOptions.SigNoz.Alertmanager,
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
serverOptions.SigNoz.Modules.OrgGetter,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -143,7 +217,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
serverOptions.SigNoz.SQLStore, integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -159,23 +233,17 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New(serverOptions.SigNoz.Licensing, serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.SigNoz.Zeus, serverOptions.SigNoz.Modules.OrgGetter)
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = usageManager.Start(context.Background())
|
||||
err = usageManager.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
telemetry.GetInstance().SetSqlStore(serverOptions.SigNoz.SQLStore)
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
telemetry.GetInstance().SetSavedViewsInfoCallback(telemetry.GetSavedViewsInfo)
|
||||
telemetry.GetInstance().SetAlertsInfoCallback(telemetry.GetAlertsInfo)
|
||||
telemetry.GetInstance().SetGetUsersCallback(telemetry.GetUsers)
|
||||
telemetry.GetInstance().SetUserCountCallback(telemetry.GetUserCount)
|
||||
telemetry.GetInstance().SetDashboardsInfoCallback(telemetry.GetDashboardsInfo)
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
if err != nil {
|
||||
@@ -184,15 +252,22 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
UsageManager: usageManager,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
GatewayUrl: serverOptions.GatewayUrl,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
JWT: serverOptions.Jwt,
|
||||
}
|
||||
|
||||
@@ -202,6 +277,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
// logger: logger,
|
||||
// tracer: tracer,
|
||||
ruleManager: rm,
|
||||
serverOptions: serverOptions,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
@@ -227,32 +304,22 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
&opAmpModel.AllAgents, agentConfMgr,
|
||||
)
|
||||
|
||||
orgs, err := apiHandler.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, org := range orgs {
|
||||
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
|
||||
for _, er := range errorList {
|
||||
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
r.Use(middleware.NewAuth(s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.serverOptions.SigNoz.Sharder, s.serverOptions.SigNoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.serverOptions.SigNoz.Instrumentation.Logger(), s.serverOptions.SigNoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.serverOptions.SigNoz.Instrumentation.Logger(),
|
||||
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics().Wrap)
|
||||
r.Use(middleware.NewLogging(s.serverOptions.SigNoz.Instrumentation.Logger(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
@@ -273,24 +340,39 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
r := baseapp.NewRouter()
|
||||
am := middleware.NewAuthZ(s.serverOptions.SigNoz.Instrumentation.Logger())
|
||||
|
||||
r.Use(middleware.NewAuth(s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.serverOptions.SigNoz.Sharder, s.serverOptions.SigNoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.serverOptions.SigNoz.Instrumentation.Logger(), s.serverOptions.SigNoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.serverOptions.SigNoz.Instrumentation.Logger(),
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
// add auth middleware
|
||||
getUserFromRequest := func(ctx context.Context) (*types.GettableUser, error) {
|
||||
user, err := auth.GetUserFromRequestContext(ctx, apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user.User.OrgID == "" {
|
||||
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics().Wrap)
|
||||
r.Use(middleware.NewLogging(s.serverOptions.SigNoz.Instrumentation.Logger(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||
apiHandler.RegisterFieldsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
@@ -298,7 +380,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||
apiHandler.RegisterThirdPartyApiRoutes(r, am)
|
||||
apiHandler.MetricExplorerRoutes(r, am)
|
||||
apiHandler.RegisterTraceFunnelsRoutes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
@@ -353,8 +434,14 @@ func (s *Server) initListeners() error {
|
||||
}
|
||||
|
||||
// Start listening on http and private http port concurrently
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
s.ruleManager.Start(ctx)
|
||||
func (s *Server) Start() error {
|
||||
|
||||
// initiate rule manager first
|
||||
if !s.serverOptions.DisableRules {
|
||||
s.ruleManager.Start()
|
||||
} else {
|
||||
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||
}
|
||||
|
||||
err := s.initListeners()
|
||||
if err != nil {
|
||||
@@ -419,15 +506,15 @@ func (s *Server) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Stop(ctx context.Context) error {
|
||||
func (s *Server) Stop() error {
|
||||
if s.httpServer != nil {
|
||||
if err := s.httpServer.Shutdown(ctx); err != nil {
|
||||
if err := s.httpServer.Shutdown(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.privateHTTP != nil {
|
||||
if err := s.privateHTTP.Shutdown(ctx); err != nil {
|
||||
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -435,40 +522,52 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
s.opampServer.Stop()
|
||||
|
||||
if s.ruleManager != nil {
|
||||
s.ruleManager.Stop(ctx)
|
||||
s.ruleManager.Stop()
|
||||
}
|
||||
|
||||
// stop usage manager
|
||||
s.usageManager.Stop(ctx)
|
||||
s.usageManager.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRulesManager(
|
||||
promConfigPath,
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
alertmanager alertmanager.Alertmanager,
|
||||
sqlstore sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
orgGetter organization.Getter,
|
||||
) (*baserules.Manager, error) {
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
TelemetryStore: telemetryStore,
|
||||
Prometheus: prometheus,
|
||||
PqlEngine: pqle,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
Alertmanager: alertmanager,
|
||||
SQLStore: sqlstore,
|
||||
OrgGetter: orgGetter,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
56
ee/query-service/auth/auth.go
Normal file
56
ee/query-service/auth/auth.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler) (*types.GettableUser, error) {
|
||||
patToken, ok := authtypes.UUIDFromContext(ctx)
|
||||
if ok && patToken != "" {
|
||||
zap.L().Debug("Received a non-zero length PAT token")
|
||||
ctx := context.Background()
|
||||
dao := apiHandler.AppDao()
|
||||
|
||||
pat, err := dao.GetPAT(ctx, patToken)
|
||||
if err == nil && pat != nil {
|
||||
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
|
||||
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
|
||||
return nil, fmt.Errorf("PAT has expired")
|
||||
}
|
||||
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
user, err := dao.GetUser(ctx, pat.UserID)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
telemetry.GetInstance().SetPatTokenUser()
|
||||
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
|
||||
user.User.GroupID = group.ID
|
||||
user.User.ID = pat.Id
|
||||
return &types.GettableUser{
|
||||
User: user.User,
|
||||
Role: pat.Role,
|
||||
}, nil
|
||||
}
|
||||
if err != nil {
|
||||
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return baseauth.GetUserFromReqContext(ctx)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSiteURL = "https://localhost:8080"
|
||||
DefaultSiteURL = "https://localhost:3301"
|
||||
)
|
||||
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
@@ -33,13 +33,3 @@ func GetOrDefaultEnv(key string, fallback string) string {
|
||||
func GetDefaultSiteURL() string {
|
||||
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
|
||||
}
|
||||
|
||||
const DotMetricsEnabled = "DOT_METRICS_ENABLED"
|
||||
|
||||
var IsDotMetricsEnabled = false
|
||||
|
||||
func init() {
|
||||
if GetOrDefaultEnv(DotMetricsEnabled, "false") == "true" {
|
||||
IsDotMetricsEnabled = true
|
||||
}
|
||||
}
|
||||
|
||||
10
ee/query-service/dao/factory.go
Normal file
10
ee/query-service/dao/factory.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
)
|
||||
|
||||
func InitDao(sqlStore sqlstore.SQLStore) (ModelDao, error) {
|
||||
return sqlite.InitDB(sqlStore)
|
||||
}
|
||||
46
ee/query-service/dao/interface.go
Normal file
46
ee/query-service/dao/interface.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type ModelDao interface {
|
||||
basedao.ModelDao
|
||||
|
||||
// SetFlagProvider sets the feature lookup provider
|
||||
SetFlagProvider(flags baseint.FeatureLookup)
|
||||
|
||||
DB() *sqlx.DB
|
||||
|
||||
// auth methods
|
||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError)
|
||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||
|
||||
// org domain (auth domains) CRUD ops
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
|
||||
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
||||
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
|
||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
|
||||
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
|
||||
}
|
||||
204
ee/query-service/dao/sqlite/auth.go
Normal file
204
ee/query-service/dao/sqlite/auth.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*types.User, basemodel.BaseApiError) {
|
||||
// get auth domain from email domain
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||
}
|
||||
if domain == nil {
|
||||
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
|
||||
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
|
||||
}
|
||||
|
||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||
}
|
||||
|
||||
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
Name: "",
|
||||
Email: email,
|
||||
Password: hash,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
ProfilePictureURL: "", // Currently unused
|
||||
GroupID: group.ID,
|
||||
OrgID: domain.OrgId,
|
||||
}
|
||||
|
||||
user, apiErr = m.CreateUser(ctx, user, false)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("CreateUser failed", zap.Error(apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
return user, nil
|
||||
|
||||
}
|
||||
|
||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||
// is successfully parsed (i.e. valid email is available)
|
||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||
|
||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||
if !apierr.IsNil() {
|
||||
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
|
||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||
}
|
||||
|
||||
user := &types.User{}
|
||||
|
||||
if userPayload == nil {
|
||||
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||
user = newUser
|
||||
if apiErr != nil {
|
||||
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
|
||||
return "", apiErr
|
||||
}
|
||||
} else {
|
||||
user = &userPayload.User
|
||||
}
|
||||
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(user, jwt)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
user.ID,
|
||||
tokenStore.RefreshJwt), nil
|
||||
}
|
||||
|
||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
return false, apierr
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, check if the user has admin role
|
||||
userPayload, baseapierr := m.GetUserByEmail(ctx, email)
|
||||
|
||||
if baseapierr != nil || userPayload == nil {
|
||||
return false, baseapierr
|
||||
}
|
||||
|
||||
if userPayload.Role != baseconst.AdminGroup {
|
||||
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PrecheckLogin is called when the login or signup page is loaded
|
||||
// to check sso login is to be prompted
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
||||
|
||||
// assume user is valid unless proven otherwise
|
||||
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||
|
||||
// check if email is a valid user
|
||||
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
||||
if baseApiErr != nil {
|
||||
return resp, baseApiErr
|
||||
}
|
||||
|
||||
if userPayload == nil {
|
||||
resp.IsUser = false
|
||||
}
|
||||
|
||||
ssoAvailable := true
|
||||
err := m.checkFeature(model.SSO)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case basemodel.ErrFeatureUnavailable:
|
||||
// do nothing, just skip sso
|
||||
ssoAvailable = false
|
||||
default:
|
||||
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||
return resp, model.BadRequestStr(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if ssoAvailable {
|
||||
|
||||
resp.IsUser = true
|
||||
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
var emailDomain string
|
||||
emailComponents := strings.Split(email, "@")
|
||||
if len(emailComponents) > 0 {
|
||||
emailDomain = emailComponents[1]
|
||||
}
|
||||
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||
return resp, apierr
|
||||
}
|
||||
|
||||
if orgDomain != nil && orgDomain.SsoEnabled {
|
||||
// saml is enabled for this domain, lets prepare sso url
|
||||
|
||||
if sourceUrl == "" {
|
||||
sourceUrl = constants.GetDefaultSiteURL()
|
||||
}
|
||||
|
||||
// parse source url that generated the login request
|
||||
var err error
|
||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||
siteUrl, err := url.Parse(escapedUrl)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to parse referer", zap.Error(err))
|
||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
||||
}
|
||||
|
||||
// build Idp URL that will authenticat the user
|
||||
// the front-end will redirect user to this url
|
||||
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||
return resp, model.InternalError(err)
|
||||
}
|
||||
|
||||
// set SSO to true, as the url is generated correctly
|
||||
resp.SSO = true
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
253
ee/query-service/dao/sqlite/domain.go
Normal file
253
ee/query-service/dao/sqlite/domain.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// StoredDomain represents stored database record for org domain
|
||||
|
||||
type StoredDomain struct {
|
||||
Id uuid.UUID `db:"id"`
|
||||
Name string `db:"name"`
|
||||
OrgId string `db:"org_id"`
|
||||
Data string `db:"data"`
|
||||
CreatedAt int64 `db:"created_at"`
|
||||
UpdatedAt int64 `db:"updated_at"`
|
||||
}
|
||||
|
||||
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
||||
// user domain. The domain is further used to process validity of the response.
|
||||
// when sending login request to IdP we send relay state as URL (site url)
|
||||
// with domainId or domainName as query parameter.
|
||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
||||
// derive domain id from relay state now
|
||||
var domainIdStr string
|
||||
var domainNameStr string
|
||||
var domain *model.OrgDomain
|
||||
|
||||
for k, v := range relayState.Query() {
|
||||
if k == "domainId" && len(v) > 0 {
|
||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||
}
|
||||
if k == "domainName" && len(v) > 0 {
|
||||
domainNameStr = v[0]
|
||||
}
|
||||
}
|
||||
|
||||
if domainIdStr != "" {
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
||||
}
|
||||
|
||||
domain, err = m.GetDomain(ctx, domainId)
|
||||
if (err != nil) || domain == nil {
|
||||
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
}
|
||||
|
||||
if domainNameStr != "" {
|
||||
|
||||
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||
domain = domainFromDB
|
||||
if (err != nil) || domain == nil {
|
||||
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
}
|
||||
if domain != nil {
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find domain received in IdP response")
|
||||
}
|
||||
|
||||
// GetDomainByName returns org domain for a given domain name
|
||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// GetDomain returns org domain for a given domain id
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// ListDomains gets the list of auth domains by org id
|
||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
|
||||
domains := []model.OrgDomain{}
|
||||
|
||||
stored := []StoredDomain{}
|
||||
err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return []model.OrgDomain{}, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
for _, s := range stored {
|
||||
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
||||
if err := domain.LoadConfig(s.Data); err != nil {
|
||||
zap.L().Error("ListDomains() failed", zap.Error(err))
|
||||
}
|
||||
domains = append(domains, domain)
|
||||
}
|
||||
|
||||
return domains, nil
|
||||
}
|
||||
|
||||
// CreateDomain creates a new auth domain
|
||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
domain.Id = uuid.New()
|
||||
}
|
||||
|
||||
if domain.OrgId == "" || domain.Name == "" {
|
||||
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
"INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
domain.Id,
|
||||
domain.Name,
|
||||
domain.OrgId,
|
||||
configJson,
|
||||
time.Now().Unix(),
|
||||
time.Now().Unix())
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to insert domain in db", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateDomain updates stored config params for a domain
|
||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
if err != nil {
|
||||
zap.L().Error("domain update failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
"UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
|
||||
configJson,
|
||||
time.Now().Unix(),
|
||||
domain.Id)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("domain update failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDomain deletes an org domain
|
||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
||||
|
||||
if id == uuid.Nil {
|
||||
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"DELETE FROM org_domains WHERE id = $1",
|
||||
id)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("domain delete failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
if email == "" {
|
||||
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
||||
}
|
||||
|
||||
components := strings.Split(email, "@")
|
||||
if len(components) < 2 {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid email address"))
|
||||
}
|
||||
|
||||
parsedDomain := components[1]
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
46
ee/query-service/dao/sqlite/modelDao.go
Normal file
46
ee/query-service/dao/sqlite/modelDao.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
)
|
||||
|
||||
type modelDao struct {
|
||||
*basedsql.ModelDaoSqlite
|
||||
flags baseint.FeatureLookup
|
||||
}
|
||||
|
||||
// SetFlagProvider sets the feature lookup provider
|
||||
func (m *modelDao) SetFlagProvider(flags baseint.FeatureLookup) {
|
||||
m.flags = flags
|
||||
}
|
||||
|
||||
// CheckFeature confirms if a feature is available
|
||||
func (m *modelDao) checkFeature(key string) error {
|
||||
if m.flags == nil {
|
||||
return fmt.Errorf("flag provider not set")
|
||||
}
|
||||
|
||||
return m.flags.CheckFeature(key)
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(sqlStore sqlstore.SQLStore) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(sqlStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// set package variable so dependent base methods (e.g. AuthCache) will work
|
||||
basedao.SetDB(dao)
|
||||
m := &modelDao{ModelDaoSqlite: dao}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) DB() *sqlx.DB {
|
||||
return m.ModelDaoSqlite.DB()
|
||||
}
|
||||
200
ee/query-service/dao/sqlite/pat.go
Normal file
200
ee/query-service/dao/sqlite/pat.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
||||
result, err := m.DB().ExecContext(ctx,
|
||||
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||
p.UserID,
|
||||
p.Token,
|
||||
p.Role,
|
||||
p.Name,
|
||||
p.CreatedAt,
|
||||
p.ExpiresAt,
|
||||
p.UpdatedAt,
|
||||
p.UpdatedByUserID,
|
||||
p.LastUsed,
|
||||
p.Revoked,
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
id, err := result.LastInsertId()
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
p.Id = strconv.Itoa(int(id))
|
||||
createdByUser, _ := m.GetUser(ctx, p.UserID)
|
||||
if createdByUser == nil {
|
||||
p.CreatedByUser = model.User{
|
||||
NotFound: true,
|
||||
}
|
||||
} else {
|
||||
p.CreatedByUser = model.User{
|
||||
Id: createdByUser.ID,
|
||||
Name: createdByUser.Name,
|
||||
Email: createdByUser.Email,
|
||||
CreatedAt: createdByUser.CreatedAt.Unix(),
|
||||
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||
NotFound: false,
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
|
||||
p.Role,
|
||||
p.Name,
|
||||
p.UpdatedAt,
|
||||
p.UpdatedByUserID,
|
||||
id)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT update failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
|
||||
lastUsed,
|
||||
token)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
|
||||
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||
}
|
||||
for i := range pats {
|
||||
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
|
||||
if createdByUser == nil {
|
||||
pats[i].CreatedByUser = model.User{
|
||||
NotFound: true,
|
||||
}
|
||||
} else {
|
||||
pats[i].CreatedByUser = model.User{
|
||||
Id: createdByUser.ID,
|
||||
Name: createdByUser.Name,
|
||||
Email: createdByUser.Email,
|
||||
CreatedAt: createdByUser.CreatedAt.Unix(),
|
||||
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||
NotFound: false,
|
||||
}
|
||||
}
|
||||
|
||||
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
|
||||
if updatedByUser == nil {
|
||||
pats[i].UpdatedByUser = model.User{
|
||||
NotFound: true,
|
||||
}
|
||||
} else {
|
||||
pats[i].UpdatedByUser = model.User{
|
||||
Id: updatedByUser.ID,
|
||||
Name: updatedByUser.Name,
|
||||
Email: updatedByUser.Email,
|
||||
CreatedAt: updatedByUser.CreatedAt.Unix(),
|
||||
ProfilePictureURL: updatedByUser.ProfilePictureURL,
|
||||
NotFound: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
return pats, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError {
|
||||
updatedAt := time.Now().Unix()
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
|
||||
userID, updatedAt, id)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token"),
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
// deprecated
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError) {
|
||||
users := []types.GettableUser{}
|
||||
|
||||
query := `SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
u.email,
|
||||
u.password,
|
||||
u.created_at,
|
||||
u.profile_picture_url,
|
||||
u.org_id,
|
||||
u.group_id
|
||||
FROM users u, personal_access_tokens p
|
||||
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
|
||||
|
||||
if err := m.DB().Select(&users, query, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
return &users[0], nil
|
||||
}
|
||||
8
ee/query-service/integrations/signozio/response.go
Normal file
8
ee/query-service/integrations/signozio/response.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package signozio
|
||||
|
||||
type status string
|
||||
|
||||
type ValidateLicenseResponse struct {
|
||||
Status status `json:"status"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
135
ee/query-service/integrations/signozio/signozio.go
Normal file
135
ee/query-service/integrations/signozio/signozio.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package signozio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
var C *Client
|
||||
|
||||
const (
|
||||
POST = "POST"
|
||||
APPLICATION_JSON = "application/json"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Prefix string
|
||||
GatewayUrl string
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
return &Client{
|
||||
Prefix: constants.LicenseSignozIo,
|
||||
GatewayUrl: constants.ZeusURL,
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
C = New()
|
||||
}
|
||||
|
||||
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
|
||||
|
||||
// Creating an HTTP client with a timeout for better control
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to create request"))
|
||||
}
|
||||
|
||||
// Setting the custom header
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to make post request"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 200:
|
||||
a := ValidateLicenseResponse{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3(a.Data)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
|
||||
}
|
||||
|
||||
return license, nil
|
||||
case 400:
|
||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
|
||||
case 401:
|
||||
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
|
||||
default:
|
||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, POST, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Content-Type", contentType)
|
||||
return req, err
|
||||
|
||||
}
|
||||
|
||||
// SendUsage reports the usage of signoz to license server
|
||||
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
||||
reqString, _ := json.Marshal(usage)
|
||||
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
switch res.StatusCode {
|
||||
case 200, 201:
|
||||
return nil
|
||||
case 400, 401:
|
||||
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
// Connector defines methods for interaction
|
||||
// with o11y data. for example - clickhouse
|
||||
type DataConnector interface {
|
||||
Start(readerReady chan bool)
|
||||
baseint.Reader
|
||||
}
|
||||
|
||||
248
ee/query-service/license/db.go
Normal file
248
ee/query-service/license/db.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Repo is license repo. stores license keys in a secured DB
|
||||
type Repo struct {
|
||||
db *sqlx.DB
|
||||
bundb *bun.DB
|
||||
}
|
||||
|
||||
// NewLicenseRepo initiates a new license repo
|
||||
func NewLicenseRepo(db *sqlx.DB, bundb *bun.DB) Repo {
|
||||
return Repo{
|
||||
db: db,
|
||||
bundb: bundb,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
|
||||
licensesData := []model.LicenseDB{}
|
||||
licenseV3Data := []*model.LicenseV3{}
|
||||
|
||||
query := "SELECT id,key,data FROM licenses_v3"
|
||||
|
||||
err := r.db.Select(&licensesData, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
|
||||
}
|
||||
|
||||
for _, l := range licensesData {
|
||||
var licenseData map[string]interface{}
|
||||
err := json.Unmarshal([]byte(l.Data), &licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
|
||||
}
|
||||
licenseV3Data = append(licenseV3Data, license)
|
||||
}
|
||||
|
||||
return licenseV3Data, nil
|
||||
}
|
||||
|
||||
// GetActiveLicense fetches the latest active license from DB.
|
||||
// If the license is not present, expect a nil license and a nil error in the output.
|
||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||
activeLicenseV3, err := r.GetActiveLicenseV3(ctx)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||
}
|
||||
|
||||
if activeLicenseV3 == nil {
|
||||
return nil, nil
|
||||
}
|
||||
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
|
||||
return activeLicenseV2, nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
|
||||
var err error
|
||||
licenses := []model.LicenseDB{}
|
||||
|
||||
query := "SELECT id,key,data FROM licenses_v3"
|
||||
|
||||
err = r.db.Select(&licenses, query)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||
}
|
||||
|
||||
var active *model.LicenseV3
|
||||
for _, l := range licenses {
|
||||
var licenseData map[string]interface{}
|
||||
err := json.Unmarshal([]byte(l.Data), &licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
|
||||
}
|
||||
|
||||
if active == nil &&
|
||||
(license.ValidFrom != 0) &&
|
||||
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
|
||||
active = license
|
||||
}
|
||||
if active != nil &&
|
||||
license.ValidFrom > active.ValidFrom &&
|
||||
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
|
||||
active = license
|
||||
}
|
||||
}
|
||||
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// InsertLicenseV3 inserts a new license v3 in db
|
||||
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
|
||||
|
||||
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
|
||||
|
||||
// licsense is the entity of zeus so putting the entire license here without defining schema
|
||||
licenseData, err := json.Marshal(l.Data)
|
||||
if err != nil {
|
||||
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
|
||||
}
|
||||
|
||||
_, err = r.db.ExecContext(ctx,
|
||||
query,
|
||||
l.ID,
|
||||
l.Key,
|
||||
string(licenseData),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if sqliteErr, ok := err.(sqlite3.Error); ok {
|
||||
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
|
||||
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
|
||||
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
|
||||
}
|
||||
}
|
||||
zap.L().Error("error in inserting license data: ", zap.Error(err))
|
||||
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLicenseV3 updates a new license v3 in db
|
||||
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
|
||||
|
||||
// the key and id for the license can't change so only update the data here!
|
||||
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
|
||||
|
||||
license, err := json.Marshal(l.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert license failed: license marshal error")
|
||||
}
|
||||
_, err = r.db.ExecContext(ctx,
|
||||
query,
|
||||
license,
|
||||
l.ID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("error in updating license data: ", zap.Error(err))
|
||||
return fmt.Errorf("failed to update license in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) CreateFeature(req *types.FeatureStatus) *basemodel.ApiError {
|
||||
|
||||
_, err := r.bundb.NewInsert().
|
||||
Model(req).
|
||||
Exec(context.Background())
|
||||
if err != nil {
|
||||
return &basemodel.ApiError{Typ: basemodel.ErrorInternal, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetFeature(featureName string) (types.FeatureStatus, error) {
|
||||
var feature types.FeatureStatus
|
||||
|
||||
err := r.bundb.NewSelect().
|
||||
Model(&feature).
|
||||
Where("name = ?", featureName).
|
||||
Scan(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return feature, err
|
||||
}
|
||||
if feature.Name == "" {
|
||||
return feature, basemodel.ErrFeatureUnavailable{Key: featureName}
|
||||
}
|
||||
return feature, nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
|
||||
|
||||
var feature []basemodel.Feature
|
||||
|
||||
err := r.db.Select(&feature,
|
||||
`SELECT * FROM feature_status;`)
|
||||
if err != nil {
|
||||
return feature, err
|
||||
}
|
||||
|
||||
return feature, nil
|
||||
}
|
||||
|
||||
func (r *Repo) UpdateFeature(req types.FeatureStatus) error {
|
||||
|
||||
_, err := r.bundb.NewUpdate().
|
||||
Model(&req).
|
||||
Where("name = ?", req.Name).
|
||||
Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) InitFeatures(req []types.FeatureStatus) error {
|
||||
// get a feature by name, if it doesn't exist, create it. If it does exist, update it.
|
||||
for _, feature := range req {
|
||||
currentFeature, err := r.GetFeature(feature.Name)
|
||||
if err != nil && err == sql.ErrNoRows {
|
||||
err := r.CreateFeature(&feature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
feature.Usage = int(currentFeature.Usage)
|
||||
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
|
||||
feature.Active = false
|
||||
}
|
||||
err = r.UpdateFeature(feature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
327
ee/query-service/license/manager.go
Normal file
327
ee/query-service/license/manager.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"sync"
|
||||
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
|
||||
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var LM *Manager
|
||||
|
||||
// validate and update license every 24 hours
|
||||
var validationFrequency = 24 * 60 * time.Minute
|
||||
|
||||
type Manager struct {
|
||||
repo *Repo
|
||||
mutex sync.Mutex
|
||||
validatorRunning bool
|
||||
// end the license validation, this is important to gracefully
|
||||
// stopping validation and protect in-consistent updates
|
||||
done chan struct{}
|
||||
// terminated waits for the validate go routine to end
|
||||
terminated chan struct{}
|
||||
// last time the license was validated
|
||||
lastValidated int64
|
||||
// keep track of validation failure attempts
|
||||
failedAttempts uint64
|
||||
// keep track of active license and features
|
||||
activeLicenseV3 *model.LicenseV3
|
||||
activeFeatures basemodel.FeatureSet
|
||||
}
|
||||
|
||||
func StartManager(db *sqlx.DB, bundb *bun.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||
if LM != nil {
|
||||
return LM, nil
|
||||
}
|
||||
|
||||
repo := NewLicenseRepo(db, bundb)
|
||||
m := &Manager{
|
||||
repo: &repo,
|
||||
}
|
||||
if err := m.start(features...); err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
LM = m
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// start loads active license in memory and initiates validator
|
||||
func (lm *Manager) start(features ...basemodel.Feature) error {
|
||||
return lm.LoadActiveLicenseV3(features...)
|
||||
}
|
||||
|
||||
func (lm *Manager) Stop() {
|
||||
close(lm.done)
|
||||
<-lm.terminated
|
||||
}
|
||||
|
||||
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
|
||||
lm.mutex.Lock()
|
||||
defer lm.mutex.Unlock()
|
||||
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lm.activeLicenseV3 = l
|
||||
lm.activeFeatures = append(l.Features, features...)
|
||||
// set default features
|
||||
setDefaultFeatures(lm)
|
||||
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.L().Panic("Couldn't activate features", zap.Error(err))
|
||||
}
|
||||
if !lm.validatorRunning {
|
||||
// we want to make sure only one validator runs,
|
||||
// we already have lock() so good to go
|
||||
lm.validatorRunning = true
|
||||
go lm.ValidatorV3(context.Background())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setDefaultFeatures(lm *Manager) {
|
||||
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
||||
}
|
||||
|
||||
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
|
||||
active, err := lm.repo.GetActiveLicenseV3(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if active != nil {
|
||||
lm.SetActiveV3(active, features...)
|
||||
} else {
|
||||
zap.L().Info("No active license found, defaulting to basic plan")
|
||||
// if no active license is found, we default to basic(free) plan with all default features
|
||||
lm.activeFeatures = model.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
|
||||
|
||||
licenses, err := lm.repo.GetLicensesV3(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
for _, l := range licenses {
|
||||
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
|
||||
l.IsCurrent = true
|
||||
}
|
||||
if l.ValidUntil == -1 {
|
||||
// for subscriptions, there is no end-date as such
|
||||
// but for showing user some validity we default one year timespan
|
||||
l.ValidUntil = l.ValidFrom + 31556926
|
||||
}
|
||||
response = append(response, l)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Validator validates license after an epoch of time
|
||||
func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||
zap.L().Info("ValidatorV3 started!")
|
||||
defer close(lm.terminated)
|
||||
|
||||
tick := time.NewTicker(validationFrequency)
|
||||
defer tick.Stop()
|
||||
|
||||
lm.ValidateV3(ctx)
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
lm.ValidateV3(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError
|
||||
}
|
||||
|
||||
err := lm.repo.UpdateLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
|
||||
}
|
||||
lm.SetActiveV3(license)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
|
||||
zap.L().Info("License validation started")
|
||||
if lm.activeLicenseV3 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
lm.mutex.Lock()
|
||||
|
||||
lm.lastValidated = time.Now().Unix()
|
||||
if reterr != nil {
|
||||
zap.L().Error("License validation completed with error", zap.Error(reterr))
|
||||
|
||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||
// default to basic plan if validation fails for three consecutive times
|
||||
if atomic.LoadUint64(&lm.failedAttempts) > 3 {
|
||||
zap.L().Error("License validation completed with error for three consecutive times, defaulting to basic plan", zap.String("license_id", lm.activeLicenseV3.ID), zap.Bool("license_validation", false))
|
||||
lm.activeLicenseV3 = nil
|
||||
lm.activeFeatures = model.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||
}
|
||||
lm.done <- struct{}{}
|
||||
lm.validatorRunning = false
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||
map[string]interface{}{"err": reterr.Error()}, "", true, false)
|
||||
} else {
|
||||
// reset the failed attempts counter
|
||||
atomic.StoreUint64(&lm.failedAttempts, 0)
|
||||
zap.L().Info("License validation completed with no errors")
|
||||
}
|
||||
|
||||
lm.mutex.Unlock()
|
||||
}()
|
||||
|
||||
err := lm.RefreshLicense(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||
map[string]interface{}{"err": errResponse.Err.Error()}, claims.Email, true, false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(licenseKey)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
|
||||
return nil, apiError
|
||||
}
|
||||
|
||||
// insert the new license to the sqlite db
|
||||
err := lm.repo.InsertLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// license is valid, activate it
|
||||
lm.SetActiveV3(license)
|
||||
return license, nil
|
||||
}
|
||||
|
||||
// CheckFeature will be internally used by backend routines
|
||||
// for feature gating
|
||||
func (lm *Manager) CheckFeature(featureKey string) error {
|
||||
feature, err := lm.repo.GetFeature(featureKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if feature.Active {
|
||||
return nil
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
|
||||
// GetFeatureFlags returns current active features
|
||||
func (lm *Manager) GetFeatureFlags() (basemodel.FeatureSet, error) {
|
||||
return lm.repo.GetAllFeatures()
|
||||
}
|
||||
|
||||
func (lm *Manager) InitFeatures(features basemodel.FeatureSet) error {
|
||||
featureStatus := make([]types.FeatureStatus, len(features))
|
||||
for i, f := range features {
|
||||
featureStatus[i] = types.FeatureStatus{
|
||||
Name: f.Name,
|
||||
Active: f.Active,
|
||||
Usage: int(f.Usage),
|
||||
UsageLimit: int(f.UsageLimit),
|
||||
Route: f.Route,
|
||||
}
|
||||
}
|
||||
return lm.repo.InitFeatures(featureStatus)
|
||||
}
|
||||
|
||||
func (lm *Manager) UpdateFeatureFlag(feature basemodel.Feature) error {
|
||||
return lm.repo.UpdateFeature(types.FeatureStatus{
|
||||
Name: feature.Name,
|
||||
Active: feature.Active,
|
||||
Usage: int(feature.Usage),
|
||||
UsageLimit: int(feature.UsageLimit),
|
||||
Route: feature.Route,
|
||||
})
|
||||
}
|
||||
|
||||
func (lm *Manager) GetFeatureFlag(key string) (basemodel.Feature, error) {
|
||||
featureStatus, err := lm.repo.GetFeature(key)
|
||||
if err != nil {
|
||||
return basemodel.Feature{}, err
|
||||
}
|
||||
return basemodel.Feature{
|
||||
Name: featureStatus.Name,
|
||||
Active: featureStatus.Active,
|
||||
Usage: int64(featureStatus.Usage),
|
||||
UsageLimit: int64(featureStatus.UsageLimit),
|
||||
Route: featureStatus.Route,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetRepo return the license repo
|
||||
func (lm *Manager) GetRepo() *Repo {
|
||||
return lm.repo
|
||||
}
|
||||
@@ -3,42 +3,89 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing"
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
"github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/ee/zeus"
|
||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
pkglicensing "github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
pkgzeus "github.com/SigNoz/signoz/pkg/zeus"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// Deprecated: Please use the logger from pkg/instrumentation.
|
||||
func initZapLog() *zap.Logger {
|
||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
|
||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||
defaultLogLevel := zapcore.InfoLevel
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("query-service"),
|
||||
)
|
||||
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
)
|
||||
|
||||
if enableQueryServiceLogOTLPExport {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to establish connection: %v", err)
|
||||
} else {
|
||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||
if err != nil {
|
||||
logExportBatchSizeInt = 512
|
||||
}
|
||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||
BatchSize: logExportBatchSizeInt,
|
||||
ResourceSchema: semconv.SchemaURL,
|
||||
Resource: res,
|
||||
}))
|
||||
core = zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
|
||||
)
|
||||
}
|
||||
}
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
|
||||
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
@@ -52,6 +99,7 @@ func main() {
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
@@ -60,41 +108,33 @@ func main() {
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
|
||||
// Deprecated
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
// Deprecated
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
// Deprecated
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
// Deprecated
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
// Deprecated
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||
// Deprecated
|
||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||
// Deprecated
|
||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||
// Deprecated
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||
// Deprecated
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
// Deprecated
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
// Deprecated
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog()
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
ctx := context.Background()
|
||||
|
||||
config, err := signoz.NewConfig(ctx, config.ResolverConfig{
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
@@ -104,17 +144,21 @@ func main() {
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
Config: promConfigPath,
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
sqlStoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err))
|
||||
signoz, err := signoz.New(
|
||||
context.Background(),
|
||||
config,
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
signoz.NewSQLStoreProviderFactories(),
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
@@ -127,36 +171,23 @@ func main() {
|
||||
|
||||
jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour)
|
||||
|
||||
signoz, err := signoz.New(
|
||||
context.Background(),
|
||||
config,
|
||||
jwt,
|
||||
zeus.Config(),
|
||||
httpzeus.NewProviderFactory(),
|
||||
licensing.Config(24*time.Hour, 3),
|
||||
func(sqlstore sqlstore.SQLStore, zeus pkgzeus.Zeus, orgGetter organization.Getter) factory.ProviderFactory[pkglicensing.Licensing, pkglicensing.Config] {
|
||||
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter)
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
sqlStoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
Config: config,
|
||||
SigNoz: signoz,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
CacheConfigPath: cacheConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
Jwt: jwt,
|
||||
}
|
||||
|
||||
@@ -165,22 +196,26 @@ func main() {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
if err := server.Start(); err != nil {
|
||||
zap.L().Fatal("Could not start server", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz.Start(ctx)
|
||||
if err := auth.InitAuthCache(context.Background()); err != nil {
|
||||
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := signoz.Wait(ctx); err != nil {
|
||||
signoz.Start(context.Background())
|
||||
|
||||
if err := signoz.Wait(context.Background()); err != nil {
|
||||
zap.L().Fatal("Failed to start signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
err = server.Stop(ctx)
|
||||
err = server.Stop()
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to stop server", zap.Error(err))
|
||||
}
|
||||
|
||||
err = signoz.Stop(ctx)
|
||||
err = signoz.Stop(context.Background())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to stop signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
12
ee/query-service/model/auth.go
Normal file
12
ee/query-service/model/auth.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
// GettableInvitation overrides base object and adds precheck into
|
||||
// response
|
||||
type GettableInvitation struct {
|
||||
*basemodel.InvitationResponseObject
|
||||
Precheck *basemodel.PrecheckResponse `json:"precheck"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package types
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -6,23 +6,15 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/ssotypes"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
saml2 "github.com/russellhaering/gosaml2"
|
||||
"github.com/uptrace/bun"
|
||||
"go.signoz.io/signoz/ee/query-service/sso"
|
||||
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type StorableOrgDomain struct {
|
||||
bun.BaseModel `bun:"table:org_domains"`
|
||||
|
||||
TimeAuditable
|
||||
ID uuid.UUID `json:"id" bun:"id,pk,type:text"`
|
||||
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
|
||||
Name string `json:"name" bun:"name,type:varchar(50),notnull,unique"`
|
||||
Data string `json:"-" bun:"data,type:text,notnull"`
|
||||
}
|
||||
|
||||
type SSOType string
|
||||
|
||||
const (
|
||||
@@ -30,31 +22,32 @@ const (
|
||||
GoogleAuth SSOType = "GOOGLE_AUTH"
|
||||
)
|
||||
|
||||
// GettableOrgDomain identify org owned web domains for auth and other purposes
|
||||
type GettableOrgDomain struct {
|
||||
StorableOrgDomain
|
||||
// OrgDomain identify org owned web domains for auth and other purposes
|
||||
type OrgDomain struct {
|
||||
Id uuid.UUID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
OrgId string `json:"orgId"`
|
||||
SsoEnabled bool `json:"ssoEnabled"`
|
||||
SsoType SSOType `json:"ssoType"`
|
||||
|
||||
SsoEnabled bool `json:"ssoEnabled"`
|
||||
SsoType SSOType `json:"ssoType"`
|
||||
SamlConfig *SamlConfig `json:"samlConfig"`
|
||||
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
||||
|
||||
SamlConfig *ssotypes.SamlConfig `json:"samlConfig"`
|
||||
GoogleAuthConfig *ssotypes.GoogleOAuthConfig `json:"googleAuthConfig"`
|
||||
|
||||
Org *Organization
|
||||
Org *types.Organization
|
||||
}
|
||||
|
||||
func (od *GettableOrgDomain) String() string {
|
||||
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.ID.String(), od.SsoType)
|
||||
func (od *OrgDomain) String() string {
|
||||
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.Id.String(), od.SsoType)
|
||||
}
|
||||
|
||||
// Valid is used a pipeline function to check if org domain
|
||||
// loaded from db is valid
|
||||
func (od *GettableOrgDomain) Valid(err error) error {
|
||||
func (od *OrgDomain) Valid(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if od.ID == uuid.Nil || od.OrgID == "" {
|
||||
if od.Id == uuid.Nil || od.OrgId == "" {
|
||||
return fmt.Errorf("both id and orgId are required")
|
||||
}
|
||||
|
||||
@@ -62,9 +55,9 @@ func (od *GettableOrgDomain) Valid(err error) error {
|
||||
}
|
||||
|
||||
// ValidNew cheks if the org domain is valid for insertion in db
|
||||
func (od *GettableOrgDomain) ValidNew() error {
|
||||
func (od *OrgDomain) ValidNew() error {
|
||||
|
||||
if od.OrgID == "" {
|
||||
if od.OrgId == "" {
|
||||
return fmt.Errorf("orgId is required")
|
||||
}
|
||||
|
||||
@@ -76,7 +69,7 @@ func (od *GettableOrgDomain) ValidNew() error {
|
||||
}
|
||||
|
||||
// LoadConfig loads config params from json text
|
||||
func (od *GettableOrgDomain) LoadConfig(jsondata string) error {
|
||||
func (od *OrgDomain) LoadConfig(jsondata string) error {
|
||||
d := *od
|
||||
err := json.Unmarshal([]byte(jsondata), &d)
|
||||
if err != nil {
|
||||
@@ -86,21 +79,21 @@ func (od *GettableOrgDomain) LoadConfig(jsondata string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (od *GettableOrgDomain) GetSAMLEntityID() string {
|
||||
func (od *OrgDomain) GetSAMLEntityID() string {
|
||||
if od.SamlConfig != nil {
|
||||
return od.SamlConfig.SamlEntity
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (od *GettableOrgDomain) GetSAMLIdpURL() string {
|
||||
func (od *OrgDomain) GetSAMLIdpURL() string {
|
||||
if od.SamlConfig != nil {
|
||||
return od.SamlConfig.SamlIdp
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (od *GettableOrgDomain) GetSAMLCert() string {
|
||||
func (od *OrgDomain) GetSAMLCert() string {
|
||||
if od.SamlConfig != nil {
|
||||
return od.SamlConfig.SamlCert
|
||||
}
|
||||
@@ -109,7 +102,7 @@ func (od *GettableOrgDomain) GetSAMLCert() string {
|
||||
|
||||
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
||||
// requesting OAuth and also used in processing response from google
|
||||
func (od *GettableOrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (ssotypes.OAuthCallbackProvider, error) {
|
||||
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
||||
if od.GoogleAuthConfig == nil {
|
||||
return nil, fmt.Errorf("GOOGLE OAUTH is not setup correctly for this domain")
|
||||
}
|
||||
@@ -118,7 +111,7 @@ func (od *GettableOrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (ssoty
|
||||
}
|
||||
|
||||
// PrepareSamlRequest creates a request accordingly gosaml2
|
||||
func (od *GettableOrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
|
||||
func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
|
||||
|
||||
// this is the url Idp will call after login completion
|
||||
acs := fmt.Sprintf("%s://%s/%s",
|
||||
@@ -140,12 +133,12 @@ func (od *GettableOrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLSe
|
||||
// currently we default it to host from window.location (received from browser)
|
||||
issuer := siteUrl.Host
|
||||
|
||||
return ssotypes.PrepareRequest(issuer, acs, sourceUrl, od.GetSAMLEntityID(), od.GetSAMLIdpURL(), od.GetSAMLCert())
|
||||
return saml.PrepareRequest(issuer, acs, sourceUrl, od.GetSAMLEntityID(), od.GetSAMLIdpURL(), od.GetSAMLCert())
|
||||
}
|
||||
|
||||
func (od *GettableOrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||
|
||||
fmtDomainId := strings.Replace(od.ID.String(), "-", ":", -1)
|
||||
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
||||
|
||||
// build redirect url from window.location sent by frontend
|
||||
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
||||
@@ -181,6 +174,7 @@ func (od *GettableOrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err e
|
||||
return googleProvider.BuildAuthURL(relayState)
|
||||
|
||||
default:
|
||||
zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
||||
return "", fmt.Errorf("unsupported SSO config for the domain")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package model
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type ApiError struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user