Compare commits
272 Commits
v0.77.0-82
...
variable-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b448e9df8 | ||
|
|
02ee073a97 | ||
|
|
1e2694ae03 | ||
|
|
cef1d56ee8 | ||
|
|
33506cafce | ||
|
|
e34e61a20d | ||
|
|
da084b4686 | ||
|
|
6821efeb99 | ||
|
|
c5d5c84a0e | ||
|
|
9c298e83a5 | ||
|
|
9383b6576d | ||
|
|
f10f7a806f | ||
|
|
03600f4d6f | ||
|
|
9fbf111976 | ||
|
|
b8dff86a56 | ||
|
|
f525647b40 | ||
|
|
0a2b7ca1d8 | ||
|
|
16938c6cc0 | ||
|
|
81b8f93177 | ||
|
|
96cfb607d1 | ||
|
|
f526e887cc | ||
|
|
03ab6e704b | ||
|
|
9c0134da54 | ||
|
|
175b059268 | ||
|
|
dfca5b13c0 | ||
|
|
ad392e81ff | ||
|
|
92ceefccee | ||
|
|
9cc4e1b56f | ||
|
|
3758ee7451 | ||
|
|
02b605d109 | ||
|
|
eb86aabf3e | ||
|
|
8810693bda | ||
|
|
6334e09a60 | ||
|
|
1d379931b2 | ||
|
|
815a6d13c5 | ||
|
|
59af9d1c2f | ||
|
|
19d24da147 | ||
|
|
cd1c9ddf11 | ||
|
|
7ff3286c9c | ||
|
|
27830742f9 | ||
|
|
39f07e7477 | ||
|
|
0ab50da7b0 | ||
|
|
c03541cd6c | ||
|
|
727a039eb9 | ||
|
|
c7db85f44c | ||
|
|
08d9a74055 | ||
|
|
503e4cdf00 | ||
|
|
224f952da7 | ||
|
|
0c28067f89 | ||
|
|
8dc749b9dd | ||
|
|
82a111e5b1 | ||
|
|
e2e6c65b4d | ||
|
|
f01d21cbf2 | ||
|
|
36886135d1 | ||
|
|
3648027576 | ||
|
|
b80626f5e2 | ||
|
|
08579242eb | ||
|
|
6e0b50dd60 | ||
|
|
76ed58c481 | ||
|
|
f4d029bd12 | ||
|
|
b66af786e6 | ||
|
|
5ad68a3310 | ||
|
|
0f0693f6eb | ||
|
|
16e3c185e9 | ||
|
|
8d6671e362 | ||
|
|
5b237ee628 | ||
|
|
cb08ce5e5d | ||
|
|
3fbc3dec48 | ||
|
|
5b2f897a00 | ||
|
|
73f57d8bee | ||
|
|
ab17bf3558 | ||
|
|
eb5a1b76b8 | ||
|
|
130ff925bd | ||
|
|
75d86cea60 | ||
|
|
cf451d335c | ||
|
|
e47c7cc17b | ||
|
|
629c54d3f9 | ||
|
|
ed3026eeb5 | ||
|
|
ccf26883c4 | ||
|
|
958924befe | ||
|
|
b70c570cdc | ||
|
|
42a026469b | ||
|
|
6de0908a62 | ||
|
|
fd21a4955e | ||
|
|
3dce13d29f | ||
|
|
2ce4b60c55 | ||
|
|
c9888804cd | ||
|
|
413b0d9fae | ||
|
|
b24095236f | ||
|
|
21d239ce68 | ||
|
|
d6e4e3c5ed | ||
|
|
552b103e8b | ||
|
|
1123a9a93d | ||
|
|
8b30e3cc5c | ||
|
|
b86e65d2ca | ||
|
|
d5e2841083 | ||
|
|
7dad5dcd17 | ||
|
|
ac0b640146 | ||
|
|
e125d146b5 | ||
|
|
a41ffceca4 | ||
|
|
7edb047c0c | ||
|
|
6504f2565b | ||
|
|
6b418a125b | ||
|
|
36827a1667 | ||
|
|
1118c56356 | ||
|
|
bd071e3e60 | ||
|
|
36f3a2e26d | ||
|
|
fee7e96176 | ||
|
|
ef4e3a30fb | ||
|
|
39532d5da0 | ||
|
|
4d216bae4d | ||
|
|
21563914c7 | ||
|
|
accb77f227 | ||
|
|
e73e1bd078 | ||
|
|
940313d28b | ||
|
|
9815ec7d81 | ||
|
|
a7cad0f1a5 | ||
|
|
a624b4758d | ||
|
|
ee5684b130 | ||
|
|
2f8da5957b | ||
|
|
3f6f77d0e2 | ||
|
|
5bceffbeaa | ||
|
|
9e449e2858 | ||
|
|
b60588a749 | ||
|
|
c322657666 | ||
|
|
a1846c008a | ||
|
|
a6824db622 | ||
|
|
e6f69aa74c | ||
|
|
a9c09f33cb | ||
|
|
9eb2196617 | ||
|
|
131759ec96 | ||
|
|
365a3e250f | ||
|
|
f3a1f3cc20 | ||
|
|
ae509b4ae9 | ||
|
|
43e2be0333 | ||
|
|
20a40b33ce | ||
|
|
a9b07c4b47 | ||
|
|
2a5c7cc0ab | ||
|
|
afb18b8142 | ||
|
|
9a580915e6 | ||
|
|
0944af3d31 | ||
|
|
9338efcefc | ||
|
|
6b9e0ce799 | ||
|
|
d4c3c24849 | ||
|
|
30d935a768 | ||
|
|
073d42c416 | ||
|
|
f11b9644cf | ||
|
|
87922e9577 | ||
|
|
8412727414 | ||
|
|
f0a95503d9 | ||
|
|
16e0fa2eef | ||
|
|
2fa944d254 | ||
|
|
b0d19035a4 | ||
|
|
054dea366e | ||
|
|
aaf0b597dc | ||
|
|
19372c8194 | ||
|
|
eb74adad44 | ||
|
|
d5c04e1342 | ||
|
|
2b9632c8fd | ||
|
|
24920ae903 | ||
|
|
6f096632a2 | ||
|
|
a42eacec4b | ||
|
|
e723399f7f | ||
|
|
48936bed9b | ||
|
|
ee70474cc7 | ||
|
|
c3fa7144ee | ||
|
|
5dd02a5b8e | ||
|
|
c0f01e4cb9 | ||
|
|
fed84cb50a | ||
|
|
80545c4d07 | ||
|
|
0b1faec092 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
8d7d33393d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb | ||
|
|
8d630707af | ||
|
|
a5b52431b7 | ||
|
|
0138d757c8 | ||
|
|
844195b84f | ||
|
|
8ff05b2e8f | ||
|
|
c8c56c544e | ||
|
|
1c43655336 | ||
|
|
c269c8c6b8 | ||
|
|
3142b6cc6d | ||
|
|
58e141685a | ||
|
|
e17f63a50c | ||
|
|
838ef5dcc5 | ||
|
|
e53d3d1269 | ||
|
|
2330420c0d | ||
|
|
65ac277074 | ||
|
|
b7982ca348 | ||
|
|
2748b49a44 | ||
|
|
7345027762 | ||
|
|
68f874e433 | ||
|
|
54a82b1664 | ||
|
|
93dc585145 | ||
|
|
6a143efd2c | ||
|
|
0116eb20ab | ||
|
|
79e9d1b357 | ||
|
|
b89ce82e25 | ||
|
|
b43a198fd8 | ||
|
|
b40ca4baf3 | ||
|
|
8df77c9221 | ||
|
|
f67555576f | ||
|
|
f0a4c37073 | ||
|
|
7972261237 | ||
|
|
3b4a8e5e0f | ||
|
|
5ef3b8ee3f | ||
|
|
597752a4bc | ||
|
|
07a244f569 | ||
|
|
eb9385840f | ||
|
|
30b689037a | ||
|
|
ba33c885d5 | ||
|
|
a4ed9e4d47 | ||
|
|
df5767198c | ||
|
|
81c7f3221a | ||
|
|
2cbd8733a1 | ||
|
|
71d1dfe9bd | ||
|
|
459712d25c | ||
|
|
61de2d414d | ||
|
|
0b7cd4c1a7 | ||
|
|
62c033ccf8 | ||
|
|
e637487984 | ||
|
|
8fc43a00f8 | ||
|
|
031d62ca44 | ||
|
|
8c4c357351 | ||
|
|
d8d8191a32 | ||
|
|
a876c0a744 | ||
|
|
c36f913a90 | ||
|
|
ed597f00c0 | ||
|
|
4957d3ae93 | ||
|
|
8835e3493d | ||
|
|
027a1631ef | ||
|
|
d7a6607a25 | ||
|
|
7a58bc58c9 | ||
|
|
88be23c3e3 | ||
|
|
8f095dfbc9 | ||
|
|
72207691a3 | ||
|
|
8998ca652e | ||
|
|
f4ae5f19ff | ||
|
|
d1ea608671 | ||
|
|
ac7ecac2c1 | ||
|
|
64071165c4 | ||
|
|
9c25a33cd9 | ||
|
|
3100d602c4 | ||
|
|
d80908a1fc | ||
|
|
bc17a10550 | ||
|
|
694c185373 | ||
|
|
02f3dfefb9 | ||
|
|
3515686daf | ||
|
|
c116bf05be | ||
|
|
4842e3b912 | ||
|
|
9bdf194d70 | ||
|
|
88aa29e94c | ||
|
|
1dfebed93a | ||
|
|
b36d2ec4c6 | ||
|
|
089f128020 | ||
|
|
e30f95c340 | ||
|
|
2c87d96d75 | ||
|
|
1f9b13dc35 | ||
|
|
d831c1cb88 | ||
|
|
bc72a0a591 | ||
|
|
ad2b75e3f0 | ||
|
|
efd4e30edf | ||
|
|
f79a5a2db6 | ||
|
|
9d8e46e5b2 | ||
|
|
0320285a25 | ||
|
|
e04e58d8b3 | ||
|
|
fc03303c29 | ||
|
|
95b94a9da7 |
@@ -1,5 +1,4 @@
|
||||
services:
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: clickhouse
|
||||
@@ -24,7 +23,6 @@ services:
|
||||
retries: 3
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: zookeeper
|
||||
@@ -41,9 +39,8 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:0.111.29
|
||||
image: signoz/signoz-schema-migrator:v0.111.41
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -55,9 +52,8 @@ services:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:0.111.29
|
||||
image: signoz/signoz-schema-migrator:v0.111.41
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
27
.devenv/docker/postgres/compose.yaml
Normal file
27
.devenv/docker/postgres/compose.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:15
|
||||
container_name: postgres
|
||||
environment:
|
||||
POSTGRES_DB: signoz
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: password
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-d",
|
||||
"signoz",
|
||||
"-U",
|
||||
"postgres"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 30s
|
||||
retries: 3
|
||||
restart: on-failure
|
||||
ports:
|
||||
- "127.0.0.1:5432:5432/tcp"
|
||||
volumes:
|
||||
- ${PWD}/fs/tmp/var/lib/postgresql/data/:/var/lib/postgresql/data/
|
||||
@@ -1,6 +1,7 @@
|
||||
.git
|
||||
.github
|
||||
.vscode
|
||||
.devenv
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
|
||||
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -2,7 +2,7 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
|
||||
/frontend/ @YounixM
|
||||
/frontend/ @SigNoz/frontend @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/deploy/ @SigNoz/devops
|
||||
@@ -11,3 +11,4 @@
|
||||
/pkg/errors/ @grandwizard28
|
||||
/pkg/factory/ @grandwizard28
|
||||
/pkg/types/ @grandwizard28
|
||||
/pkg/sqlmigration/ @vikrantgupta25
|
||||
|
||||
75
.github/pull_request_template.md
vendored
75
.github/pull_request_template.md
vendored
@@ -1,17 +1,74 @@
|
||||
### Summary
|
||||
## 📄 Summary
|
||||
|
||||
<!-- ✍️ A clear and concise description...-->
|
||||
<!-- Describe the purpose of the PR in a few sentences. What does it fix/add/update? -->
|
||||
|
||||
#### Related Issues / PR's
|
||||
---
|
||||
|
||||
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
|
||||
## ✅ Changes
|
||||
|
||||
#### Screenshots
|
||||
- [ ] Feature: Brief description
|
||||
- [ ] Bug fix: Brief description
|
||||
|
||||
NA
|
||||
---
|
||||
|
||||
<!-- ✍️ Add screenshots of before and after changes where applicable-->
|
||||
## 🏷️ Required: Add Relevant Labels
|
||||
|
||||
#### Affected Areas and Manually Tested Areas
|
||||
> ⚠️ **Manually add appropriate labels in the PR sidebar**
|
||||
Please select one or more labels (as applicable):
|
||||
|
||||
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
|
||||
ex:
|
||||
|
||||
- `frontend`
|
||||
- `backend`
|
||||
- `devops`
|
||||
- `bug`
|
||||
- `enhancement`
|
||||
- `ui`
|
||||
- `test`
|
||||
|
||||
---
|
||||
|
||||
## 👥 Reviewers
|
||||
|
||||
> Tag the relevant teams for review:
|
||||
|
||||
- [ ] @SigNoz/frontend
|
||||
- [ ] @SigNoz/backend
|
||||
- [ ] @SigNoz/devops
|
||||
|
||||
---
|
||||
|
||||
## 🧪 How to Test
|
||||
|
||||
<!-- Describe how reviewers can test this PR -->
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Related Issues
|
||||
|
||||
<!-- Reference any related issues (e.g. Fixes #123, Closes #456) -->
|
||||
Closes #
|
||||
|
||||
---
|
||||
|
||||
## 📸 Screenshots / Screen Recording (if applicable / mandatory for UI related changes)
|
||||
|
||||
<!-- Add screenshots or GIFs to help visualize changes -->
|
||||
|
||||
---
|
||||
|
||||
## 📋 Checklist
|
||||
|
||||
- [ ] Dev Review
|
||||
- [ ] Test cases added (Unit/ Integration / E2E)
|
||||
- [ ] Manually tested the changes
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 👀 Notes for Reviewers
|
||||
|
||||
<!-- Anything reviewers should keep in mind while reviewing -->
|
||||
|
||||
42
.github/workflows/README.md
vendored
42
.github/workflows/README.md
vendored
@@ -1,42 +0,0 @@
|
||||
# Github actions
|
||||
|
||||
## Testing the UI manually on each PR
|
||||
|
||||
First we need to make sure the UI is ready
|
||||
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
|
||||
* This job will run until the PR is merged or closed to keep the local tunneling alive
|
||||
- github will cancel this job if the PR wasn't merged after 6h
|
||||
- if the job was cancel, go to the action and press `Re-run all jobs`
|
||||
|
||||
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th> Variables </th>
|
||||
<th> Description </th>
|
||||
<th> Example </th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> REPONAME </td>
|
||||
<td> Provide the DockerHub user/organisation name of the image. </td>
|
||||
<td> signoz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> DOCKERHUB_USERNAME </td>
|
||||
<td> Docker hub username </td>
|
||||
<td> signoz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> DOCKERHUB_TOKEN </td>
|
||||
<td> Docker hub password/token with push permission </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> SONAR_TOKEN </td>
|
||||
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
82
.github/workflows/build-community.yaml
vendored
Normal file
82
.github/workflows/build-community.yaml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
117
.github/workflows/build-enterprise.yaml
vendored
Normal file
117
.github/workflows/build-enterprise.yaml
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
echo 'USERPILOT_KEY="${{ secrets.USERPILOT_KEY }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: enterprise-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
126
.github/workflows/build-staging.yaml
vendored
Normal file
126
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.NP_TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'USERPILOT_KEY="${{ secrets.NP_USERPILOT_KEY }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: staging-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: staging-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
48
.github/workflows/build.yaml
vendored
48
.github/workflows/build.yaml
vendored
@@ -1,48 +0,0 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Build frontend static files
|
||||
shell: bash
|
||||
run: |
|
||||
make build-frontend-static
|
||||
|
||||
build-signoz:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: Build signoz image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-signoz-amd64
|
||||
|
||||
build-signoz-community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: Build signoz community image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-signoz-community-amd64
|
||||
12
.github/workflows/commitci.yaml
vendored
12
.github/workflows/commitci.yaml
vendored
@@ -25,15 +25,3 @@ jobs:
|
||||
else
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"
|
||||
fi
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: lint
|
||||
uses: wagoid/commitlint-github-action@v5
|
||||
|
||||
39
.github/workflows/goci.yaml
vendored
39
.github/workflows/goci.yaml
vendored
@@ -18,6 +18,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_TEST_CONTEXT: ./...
|
||||
GO_VERSION: 1.23
|
||||
fmt:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -26,6 +27,7 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -34,3 +36,40 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
deps:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
uses: signoz/primus.workflows/.github/workflows/go-deps.yaml@main
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
build:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: go-install
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
- name: qemu-install
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: aarch64-install
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: docker-community
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-community
|
||||
- name: docker-enterprise
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-enterprise
|
||||
|
||||
6
.github/workflows/gor-signoz-community.yaml
vendored
6
.github/workflows/gor-signoz-community.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: build-frontend
|
||||
run: make build-frontend-static
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
|
||||
7
.github/workflows/gor-signoz.yaml
vendored
7
.github/workflows/gor-signoz.yaml
vendored
@@ -35,8 +35,9 @@ jobs:
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
|
||||
echo 'USERPILOT_KEY="${{ secrets.USERPILOT_KEY }}"' >> .env
|
||||
- name: build-frontend
|
||||
run: make build-frontend-static
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -72,7 +73,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
@@ -135,7 +136,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
|
||||
53
.github/workflows/integrationci.yaml
vendored
Normal file
53
.github/workflows/integrationci.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: integrationci
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
src:
|
||||
- bootstrap
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 24.1.2-alpine
|
||||
- 24.12-alpine
|
||||
schema-migrator-version:
|
||||
- v0.111.38
|
||||
postgres-version:
|
||||
- 15
|
||||
if: |
|
||||
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.13
|
||||
- name: poetry
|
||||
run: |
|
||||
python -m pip install poetry==2.1.2
|
||||
python -m poetry config virtualenvs.in-project true
|
||||
cd tests/integration && poetry install --no-root
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
poetry run pytest \
|
||||
--basetemp=./tmp/ \
|
||||
src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
--postgres-version ${{matrix.postgres-version}} \
|
||||
--clickhouse-version ${{matrix.clickhouse-version}} \
|
||||
--schema-migrator-version ${{matrix.schema-migrator-version}}
|
||||
4
.github/workflows/prereleaser.yaml
vendored
4
.github/workflows/prereleaser.yaml
vendored
@@ -1,9 +1,9 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
|
||||
schedule:
|
||||
- cron: '30 9 * * 3'
|
||||
- cron: '30 6 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
|
||||
134
.github/workflows/push.yaml
vendored
134
.github/workflows/push.yaml
vendored
@@ -1,134 +0,0 @@
|
||||
name: push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
image-build-and-push-signoz:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: create-env-file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: branch-name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish-signoz
|
||||
run: make build-push-signoz
|
||||
- name: qs-docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: publish-query-service
|
||||
run: |
|
||||
SIGNOZ_DOCKER_IMAGE=query-service make build-push-signoz
|
||||
|
||||
image-build-and-push-signoz-community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: branch-name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish-signoz-community
|
||||
run: make build-push-signoz-community
|
||||
- name: qs-docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: publish-query-service-oss
|
||||
run: |
|
||||
SIGNOZ_COMMUNITY_DOCKER_IMAGE=query-service make build-push-signoz-community
|
||||
16
.github/workflows/remove-label.yaml
vendored
16
.github/workflows/remove-label.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: remove-label
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [synchronize]
|
||||
|
||||
jobs:
|
||||
remove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove label testing-deploy from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
with:
|
||||
label: testing-deploy
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
55
.github/workflows/staging-deployment.yaml
vendored
55
.github/workflows/staging-deployment.yaml
vendored
@@ -1,55 +0,0 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-signoz-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
55
.github/workflows/testing-deployment.yaml
vendored
55
.github/workflows/testing-deployment.yaml
vendored
@@ -1,55 +0,0 @@
|
||||
name: testing-deployment
|
||||
# Trigger deployment only on testing-deploy label on pull request
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy PR branch to testing
|
||||
runs-on: ubuntu-latest
|
||||
environment: testing
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
git checkout --track origin/${GITHUB_BRANCH}
|
||||
make build-signoz-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
150
.gitignore
vendored
150
.gitignore
vendored
@@ -54,14 +54,13 @@ ee/query-service/tests/test-deploy/data/
|
||||
bin/
|
||||
.local/
|
||||
*/query-service/queries.active
|
||||
ee/query-service/db
|
||||
|
||||
# e2e
|
||||
|
||||
e2e/node_modules/
|
||||
e2e/test-results/
|
||||
e2e/playwright-report/
|
||||
e2e/blob-report/
|
||||
e2e/playwright/.cache/
|
||||
e2e/.auth
|
||||
|
||||
# go
|
||||
@@ -79,6 +78,153 @@ deploy/common/clickhouse/user_scripts/
|
||||
|
||||
queries.active
|
||||
|
||||
# tmp
|
||||
**/tmp/**
|
||||
|
||||
# .devenv tmp files
|
||||
.devenv/**/tmp/**
|
||||
.qodo
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
### Python Patch ###
|
||||
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python
|
||||
17
.versions/alpine
Normal file
17
.versions/alpine
Normal file
@@ -0,0 +1,17 @@
|
||||
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
|
||||
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
|
||||
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
|
||||
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
|
||||
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
|
||||
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
|
||||
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
|
||||
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
|
||||
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
|
||||
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
|
||||
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
|
||||
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
|
||||
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
|
||||
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
|
||||
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
|
||||
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
|
||||
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01
|
||||
@@ -77,3 +77,4 @@ Need assistance? Join our Slack community:
|
||||
## Where do I go from here?
|
||||
|
||||
- Set up your [development environment](docs/contributing/development.md)
|
||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
||||
|
||||
319
Makefile
319
Makefile
@@ -1,49 +1,45 @@
|
||||
#
|
||||
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
|
||||
#
|
||||
##############################################################
|
||||
# variables
|
||||
##############################################################
|
||||
SHELL := /bin/bash
|
||||
SRC ?= $(shell pwd)
|
||||
NAME ?= signoz
|
||||
OS ?= $(shell uname -s | tr '[A-Z]' '[a-z]')
|
||||
ARCH ?= $(shell uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
|
||||
COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD)
|
||||
BRANCH_NAME ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
VERSION ?= $(BRANCH_NAME)-$(COMMIT_SHORT_SHA)
|
||||
TIMESTAMP ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
ARCHS ?= amd64 arm64
|
||||
TARGET_DIR ?= $(shell pwd)/target
|
||||
|
||||
# Build variables
|
||||
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
LICENSE_SIGNOZ_IO ?= https://license.signoz.io/api/v1
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
DEV_ZEUS_URL ?= https://api.staging.signoz.cloud
|
||||
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
GO_BUILD_LDFLAG_ZEUS_URL = -X github.com/SigNoz/signoz/ee/zeus.url=$(ZEUS_URL)
|
||||
LICENSE_URL ?= https://license.signoz.io
|
||||
GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=$(LICENSE_URL)
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm
|
||||
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
|
||||
GORELEASER_BIN ?= goreleaser
|
||||
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
|
||||
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
|
||||
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
||||
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
|
||||
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
||||
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= $(BUILD_VERSION)
|
||||
SIGNOZ_DOCKER_IMAGE ?= signoz
|
||||
SIGNOZ_COMMUNITY_DOCKER_IMAGE ?= signoz-community
|
||||
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/signoz
|
||||
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
|
||||
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
|
||||
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
||||
PROD_LD_FLAGS=-X ${zeusURL}=${ZEUS_URL} -X ${licenseSignozIo}=${LICENSE_SIGNOZ_IO}
|
||||
DEV_LD_FLAGS=-X ${zeusURL}=${DEV_ZEUS_URL} -X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
|
||||
DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
|
||||
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
|
||||
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
|
||||
DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
|
||||
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
|
||||
JS_BUILD_CONTEXT = $(SRC)/frontend
|
||||
|
||||
##############################################################
|
||||
# directories
|
||||
##############################################################
|
||||
$(TARGET_DIR):
|
||||
mkdir -p $(TARGET_DIR)
|
||||
|
||||
##############################################################
|
||||
# common commands
|
||||
@@ -60,11 +56,16 @@ devenv-clickhouse: ## Run clickhouse in devenv
|
||||
@cd .devenv/docker/clickhouse; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
.PHONY: devenv-postgres
|
||||
devenv-postgres: ## Run postgres in devenv
|
||||
@cd .devenv/docker/postgres; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
##############################################################
|
||||
# run commands
|
||||
# go commands
|
||||
##############################################################
|
||||
.PHONY: run-go
|
||||
run-go: ## Runs the go backend server
|
||||
.PHONY: go-run-enterprise
|
||||
go-run-enterprise: ## Runs the enterprise go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
@@ -73,147 +74,127 @@ run-go: ## Runs the go backend server
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
go run -race \
|
||||
./ee/query-service/main.go \
|
||||
--config ./pkg/query-service/config/prometheus.yml \
|
||||
--cluster cluster \
|
||||
--use-logs-new-schema true \
|
||||
--use-trace-new-schema true
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
|
||||
all: build-push-frontend build-push-signoz
|
||||
.PHONY: go-test
|
||||
go-test: ## Runs go unit tests
|
||||
@go test -race ./...
|
||||
|
||||
# Steps to build static files of frontend
|
||||
build-frontend-static:
|
||||
@echo "------------------"
|
||||
@echo "--> Building frontend static files"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
rm -rf build && \
|
||||
CI=1 yarn install && \
|
||||
yarn build && \
|
||||
ls -l build
|
||||
.PHONY: go-run-community
|
||||
go-run-community: ## Runs the community go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
SIGNOZ_JWT_SECRET=secret \
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
|
||||
# Steps to build static binary of signoz
|
||||
.PHONY: build-signoz-static
|
||||
build-signoz-static:
|
||||
@echo "------------------"
|
||||
@echo "--> Building signoz static binary"
|
||||
@echo "------------------"
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
go-build-community: ## Builds the go backend server for community
|
||||
go-build-community: $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
$(GO_BUILD_ARCHS_COMMUNITY): go-build-community-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)-community"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
else \
|
||||
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${PROD_LD_FLAGS}"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
fi
|
||||
|
||||
.PHONY: build-signoz-static-amd64
|
||||
build-signoz-static-amd64:
|
||||
make GOARCH=amd64 build-signoz-static
|
||||
|
||||
.PHONY: build-signoz-static-arm64
|
||||
build-signoz-static-arm64:
|
||||
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-signoz-static
|
||||
|
||||
# Steps to build static binary of signoz for all platforms
|
||||
.PHONY: build-signoz-static-all
|
||||
build-signoz-static-all: build-signoz-static-amd64 build-signoz-static-arm64 build-frontend-static
|
||||
|
||||
# Steps to build and push docker image of signoz
|
||||
.PHONY: build-signoz-amd64 build-push-signoz
|
||||
# Step to build docker image of signoz in amd64 (used in build pipeline)
|
||||
build-signoz-amd64: build-signoz-static-amd64 build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building signoz docker image for amd64"
|
||||
@echo "------------------"
|
||||
@docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-signoz: build-signoz-static-all
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing signoz docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||
--push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Step to build docker image of signoz community in amd64 (used in build pipeline)
|
||||
build-signoz-community-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building signoz docker image for amd64"
|
||||
@echo "------------------"
|
||||
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-signoz-amd64
|
||||
|
||||
# Step to build and push docker image of signoz community in amd64 and arm64 (used in push pipeline)
|
||||
build-push-signoz-community:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing signoz community docker image"
|
||||
@echo "------------------"
|
||||
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-push-signoz
|
||||
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
run-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||
|
||||
run-testing:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
|
||||
|
||||
down-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
check-no-ee-references:
|
||||
@echo "Checking for 'ee' package references in 'pkg' directory..."
|
||||
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
|
||||
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
|
||||
exit 1; \
|
||||
.PHONY: go-build-enterprise $(GO_BUILD_ARCHS_ENTERPRISE)
|
||||
go-build-enterprise: ## Builds the go backend server for enterprise
|
||||
go-build-enterprise: $(GO_BUILD_ARCHS_ENTERPRISE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
test:
|
||||
go test ./pkg/...
|
||||
|
||||
########################################################
|
||||
# Goreleaser
|
||||
########################################################
|
||||
.PHONY: gor-snapshot gor-snapshot-histogram-quantile gor-snapshot-signoz gor-snapshot-signoz-community gor-split gor-split-histogram-quantile gor-split-signoz gor-split-signoz-community gor-merge
|
||||
|
||||
gor-snapshot:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --snapshot; \
|
||||
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
go-build-enterprise-race: ## Builds the go backend server for enterprise with race
|
||||
go-build-enterprise-race: $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
${GORELEASER_BIN} release --clean --snapshot; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
gor-snapshot-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
|
||||
##############################################################
|
||||
# js commands
|
||||
##############################################################
|
||||
.PHONY: js-build
|
||||
js-build: ## Builds the js frontend
|
||||
@echo ">> building js frontend"
|
||||
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
|
||||
|
||||
gor-snapshot-signoz: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
|
||||
##############################################################
|
||||
# docker commands
|
||||
##############################################################
|
||||
.PHONY: docker-build-community $(DOCKER_BUILD_ARCHS_COMMUNITY)
|
||||
docker-build-community: ## Builds the docker image for community
|
||||
docker-build-community: $(DOCKER_BUILD_ARCHS_COMMUNITY)
|
||||
$(DOCKER_BUILD_ARCHS_COMMUNITY): docker-build-community-%: go-build-community-% js-build
|
||||
@echo ">> building docker image for $(NAME)-community"
|
||||
@docker build -t "$(DOCKER_REGISTRY_COMMUNITY):$(VERSION)-$*" \
|
||||
--build-arg TARGETARCH="$*" \
|
||||
-f $(DOCKERFILE_COMMUNITY) $(SRC)
|
||||
|
||||
gor-snapshot-signoz-community: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
|
||||
.PHONY: docker-buildx-community
|
||||
docker-buildx-community: ## Builds the docker image for community using buildx
|
||||
docker-buildx-community: go-build-community js-build
|
||||
@echo ">> building docker image for $(NAME)-community"
|
||||
@docker buildx build --file $(DOCKERFILE_COMMUNITY) \
|
||||
--progress plain \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_COMMUNITY):$(VERSION) $(SRC)
|
||||
|
||||
gor-split:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --split; \
|
||||
else \
|
||||
${GORELEASER_BIN} release --clean --split; \
|
||||
fi
|
||||
.PHONY: docker-build-enterprise $(DOCKER_BUILD_ARCHS_ENTERPRISE)
|
||||
docker-build-enterprise: ## Builds the docker image for enterprise
|
||||
docker-build-enterprise: $(DOCKER_BUILD_ARCHS_ENTERPRISE)
|
||||
$(DOCKER_BUILD_ARCHS_ENTERPRISE): docker-build-enterprise-%: go-build-enterprise-% js-build
|
||||
@echo ">> building docker image for $(NAME)"
|
||||
@docker build -t "$(DOCKER_REGISTRY_ENTERPRISE):$(VERSION)-$*" \
|
||||
--build-arg TARGETARCH="$*" \
|
||||
-f $(DOCKERFILE_ENTERPRISE) $(SRC)
|
||||
|
||||
gor-split-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-split
|
||||
.PHONY: docker-buildx-enterprise
|
||||
docker-buildx-enterprise: ## Builds the docker image for enterprise using buildx
|
||||
docker-buildx-enterprise: go-build-enterprise js-build
|
||||
@echo ">> building docker image for $(NAME)"
|
||||
@docker buildx build --file $(DOCKERFILE_ENTERPRISE) \
|
||||
--progress plain \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_ENTERPRISE):$(VERSION) $(SRC)
|
||||
|
||||
gor-split-signoz: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-split
|
||||
##############################################################
|
||||
# python commands
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && poetry run black .
|
||||
|
||||
gor-split-signoz-community: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-split
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && poetry run isort .
|
||||
@cd tests/integration && poetry run autoflake .
|
||||
@cd tests/integration && poetry run pylint .
|
||||
|
||||
gor-merge:
|
||||
${GORELEASER_BIN} continue --merge
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@@ -3,6 +3,12 @@
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Version #####################
|
||||
version:
|
||||
banner:
|
||||
# Whether to enable the version banner on startup.
|
||||
enabled: true
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
@@ -44,7 +50,7 @@ cache:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval: 1m
|
||||
cleanup_interval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
@@ -66,7 +72,6 @@ sqlstore:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
timeout:
|
||||
@@ -82,21 +87,39 @@ apiserver:
|
||||
# List of routes to exclude from request responselogging.
|
||||
excluded_routes:
|
||||
- /api/v1/health
|
||||
|
||||
- /api/v1/version
|
||||
- /
|
||||
|
||||
##################### TelemetryStore #####################
|
||||
telemetrystore:
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
# Maximum number of idle connections in the connection pool.
|
||||
max_idle_conns: 50
|
||||
# Maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
# Maximum time to wait for a connection to be established.
|
||||
dial_timeout: 5s
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
clickhouse:
|
||||
# The DSN to use for ClickHouse.
|
||||
dsn: http://localhost:9000
|
||||
# The DSN to use for clickhouse.
|
||||
dsn: tcp://localhost:9000
|
||||
# The query settings for clickhouse.
|
||||
settings:
|
||||
max_execution_time: 0
|
||||
max_execution_time_leaf: 0
|
||||
timeout_before_checking_execution_speed: 0
|
||||
max_bytes_to_read: 0
|
||||
max_result_rows_for_ch_query: 0
|
||||
|
||||
##################### Prometheus #####################
|
||||
prometheus:
|
||||
active_query_tracker:
|
||||
# Whether to enable the active query tracker.
|
||||
enabled: true
|
||||
# The path to use for the active query tracker.
|
||||
path: ""
|
||||
# The maximum number of concurrent queries.
|
||||
max_concurrent: 20
|
||||
|
||||
##################### Alertmanager #####################
|
||||
alertmanager:
|
||||
@@ -109,7 +132,7 @@ alertmanager:
|
||||
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
||||
poll_interval: 1m
|
||||
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
|
||||
external_url: http://localhost:9093
|
||||
external_url: http://localhost:8080
|
||||
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
|
||||
global:
|
||||
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
|
||||
@@ -141,3 +164,9 @@ alertmanager:
|
||||
maintenance_interval: 15m
|
||||
# Retention of the notification logs.
|
||||
retention: 120h
|
||||
|
||||
|
||||
##################### Analytics #####################
|
||||
analytics:
|
||||
# Whether to enable analytics.
|
||||
enabled: false
|
||||
|
||||
@@ -174,11 +174,9 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.76.2
|
||||
image: signoz/signoz:v0.83.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -208,7 +206,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.34
|
||||
image: signoz/signoz-otel-collector:v0.111.41
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -232,7 +230,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.34
|
||||
image: signoz/signoz-schema-migrator:v0.111.41
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,11 +110,9 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.76.2
|
||||
image: signoz/signoz:v0.83.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -143,7 +141,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.34
|
||||
image: signoz/signoz-otel-collector:v0.111.41
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -167,7 +165,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.34
|
||||
image: signoz/signoz-schema-migrator:v0.111.41
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -26,7 +26,7 @@ processors:
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
@@ -64,8 +64,10 @@ exporters:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
disable_v2: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
disable_v2: true
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
|
||||
@@ -177,12 +177,10 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${DOCKER_TAG:-v0.76.2}
|
||||
image: signoz/signoz:${VERSION:-v0.83.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -212,7 +210,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.41}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -238,7 +236,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -249,7 +247,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${DOCKER_TAG:-v0.76.2}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --gateway-url=https://api.staging.signoz.cloud
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
signoz:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
restart: on-failure
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
@@ -110,12 +110,10 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${DOCKER_TAG:-v0.76.2}
|
||||
image: signoz/signoz:${VERSION:-v0.83.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -144,7 +142,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.41}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +164,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +176,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -26,7 +26,7 @@ processors:
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
@@ -62,10 +62,12 @@ exporters:
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
disable_v2: true
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
disable_v2: true
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
|
||||
@@ -93,7 +93,7 @@ check_os() {
|
||||
;;
|
||||
Red\ Hat*)
|
||||
desired_os=1
|
||||
os="red hat"
|
||||
os="rhel"
|
||||
package_manager="yum"
|
||||
;;
|
||||
CentOS*)
|
||||
|
||||
@@ -61,7 +61,7 @@ This command:
|
||||
|
||||
1. Run the backend server:
|
||||
```bash
|
||||
make run-go
|
||||
make go-run-community
|
||||
```
|
||||
|
||||
2. Verify it's working:
|
||||
|
||||
103
docs/contributing/go/errors.md
Normal file
103
docs/contributing/go/errors.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Errors
|
||||
|
||||
SigNoz includes its own structured [errors](/pkg/errors/errors.go) package. It's built on top of Go's `error` interface, extending it to add additional context that helps provide more meaningful error messages throughout the application.
|
||||
|
||||
## How to use it?
|
||||
|
||||
To use the SigNoz structured errors package, use these functions instead of the standard library alternatives:
|
||||
|
||||
```go
|
||||
// Instead of errors.New()
|
||||
errors.New(typ, code, message)
|
||||
|
||||
// Instead of fmt.Errorf()
|
||||
errors.Newf(typ, code, message, args...)
|
||||
```
|
||||
|
||||
### Typ
|
||||
The Typ (read as Type, defined as `typ`) is used to categorize errors across the codebase and is loosely coupled with HTTP/GRPC status codes. All predefined types can be found in [pkg/errors/type.go](/pkg/errors/type.go). For example:
|
||||
|
||||
- `TypeInvalidInput` - Indicates invalid input was provided
|
||||
- `TypeNotFound` - Indicates a resource was not found
|
||||
|
||||
By design, `typ` is unexported and cannot be declared outside of [errors](/pkg/errors/errors.go) package. This ensures that it is consistent across the codebase and is used in a way that is meaningful.
|
||||
|
||||
### Code
|
||||
Codes are used to provide more granular categorization within types. For instance, a type of `TypeInvalidInput` might have codes like `CodeInvalidEmail` or `CodeInvalidPassword`.
|
||||
|
||||
To create new error codes, use the `errors.MustNewCode` function:
|
||||
|
||||
```go
|
||||
var (
|
||||
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
|
||||
CodeThingNotFound = errors.MustNewCode("thing_not_found")
|
||||
)
|
||||
```
|
||||
|
||||
> 💡 **Note**: Error codes must match the regex `^[a-z_]+$` otherwise the code will panic.
|
||||
|
||||
## Show me some examples
|
||||
|
||||
### Using the error
|
||||
A basic example of using the error:
|
||||
|
||||
```go
|
||||
var (
|
||||
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
|
||||
)
|
||||
|
||||
func CreateThing(id string) error {
|
||||
t, err := thing.GetFromStore(id)
|
||||
if err != nil {
|
||||
if errors.As(err, errors.TypeNotFound) {
|
||||
// thing was not found, create it
|
||||
return thing.Create(id)
|
||||
}
|
||||
|
||||
// something else went wrong, wrap the error with more context
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeUnknown, "failed to get thing from store")
|
||||
}
|
||||
|
||||
return errors.Newf(errors.TypeAlreadyExists, CodeThingAlreadyExists, "thing with id %s already exists", id)
|
||||
}
|
||||
```
|
||||
|
||||
### Changing the error
|
||||
Sometimes you may want to change the error while preserving the message:
|
||||
|
||||
```go
|
||||
func GetUserSecurely(id string) (*User, error) {
|
||||
user, err := repository.GetUser(id)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
// Convert NotFound to Forbidden for security reasons
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeAccessDenied, "access denied to requested resource")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return user, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Why do we need this?
|
||||
|
||||
In a large codebase like SigNoz, error handling is critical for maintaining reliability, debuggability, and a good user experience. We believe that it is the **responsibility of a function** to return **well-defined** errors that **accurately describe what went wrong**. With our structured error system:
|
||||
|
||||
- Functions can create precise errors with appropriate additional context
|
||||
- Callers can make informed decisions based on the additional context
|
||||
- Error context is preserved and enhanced as it moves up the call stack
|
||||
|
||||
The caller (which can be another function or a HTTP/gRPC handler or something else entirely), can then choose to use this error to take appropriate actions such as:
|
||||
|
||||
- A function can branch into different paths based on the context
|
||||
- An HTTP/gRPC handler can derive the correct status code and message from the error and send it to the client
|
||||
- Logging systems can capture structured error information for better diagnostics
|
||||
|
||||
Although there might be cases where this might seem too verbose, it makes the code more maintainable and consistent. A little verbose code is better than clever code that doesn't provide enough context.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Think about error handling as you write your code, not as an afterthought.
|
||||
- Always use the [errors](/pkg/errors/errors.go) package instead of the standard library's `errors.New()` or `fmt.Errorf()`.
|
||||
- Always assign appropriate codes to errors when creating them instead of using the "catch all" error codes defined in [pkg/errors/code.go](/pkg/errors/code.go).
|
||||
- Use `errors.Wrapf()` to add context to errors while preserving the original when appropriate.
|
||||
11
docs/contributing/go/readme.md
Normal file
11
docs/contributing/go/readme.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Go
|
||||
|
||||
This document provides an overview of contributing to the SigNoz backend written in Go. The SigNoz backend is built with Go, focusing on performance, maintainability, and developer experience. We strive for clean, idiomatic code that follows established Go practices while addressing the unique needs of an observability platform.
|
||||
|
||||
We adhere to three primary style guides as our foundation:
|
||||
|
||||
- [Effective Go](https://go.dev/doc/effective_go) - For writing idiomatic Go code
|
||||
- [Code Review Comments](https://go.dev/wiki/CodeReviewComments) - For understanding common comments in code reviews
|
||||
- [Google Style Guide](https://google.github.io/styleguide/go/) - Additional practices from Google
|
||||
|
||||
We **recommend** (almost enforce) reviewing these guides before contributing to the codebase. They provide valuable insights into writing idiomatic Go code and will help you understand our approach to backend development. In addition, we have a few additional rules that make certain areas stricter than the above which can be found in area-specific files in this package.
|
||||
94
docs/contributing/go/sql.md
Normal file
94
docs/contributing/go/sql.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# SQL
|
||||
SigNoz utilizes a relational database to store metadata including organization information, user data and other settings.
|
||||
|
||||
## How to use it?
|
||||
|
||||
The database interface is defined in [SQLStore](/pkg/sqlstore/sqlstore.go). SigNoz leverages the Bun ORM to interact with the underlying database. To access the database instance, use the `BunDBCtx` function. For operations that require transactions across multiple database operations, use the `RunInTxCtx` function. This function embeds a transaction in the context, which propagates through various functions in the callback.
|
||||
|
||||
```go
|
||||
type Thing struct {
|
||||
bun.BaseModel
|
||||
|
||||
ID types.Identifiable `bun:",embed"`
|
||||
SomeColumn string `bun:"some_column"`
|
||||
TimeAuditable types.TimeAuditable `bun:",embed"`
|
||||
OrgID string `bun:"org_id"`
|
||||
}
|
||||
|
||||
func GetThing(ctx context.Context, id string) (*Thing, error) {
|
||||
thing := new(Thing)
|
||||
err := sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(thing).
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
|
||||
return thing, err
|
||||
}
|
||||
|
||||
func CreateThing(ctx context.Context, thing *Thing) error {
|
||||
return sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewInsert().
|
||||
Model(thing).
|
||||
Exec(ctx)
|
||||
}
|
||||
```
|
||||
|
||||
> 💡 **Note**: Always use line breaks while working with SQL queries to enhance code readability.
|
||||
|
||||
> 💡 **Note**: Always use the `new` function to create new instances of structs.
|
||||
|
||||
## What are hooks?
|
||||
|
||||
Hooks are user-defined functions that execute before and/or after specific database operations. These hooks are particularly useful for generating telemetry data such as logs, traces, and metrics, providing visibility into database interactions. Hooks are defined in the [SQLStoreHook](/pkg/sqlstore/sqlstore.go) interface.
|
||||
|
||||
## How is the schema designed?
|
||||
|
||||
SigNoz implements a star schema design with the organizations table as the central entity. All other tables link to the organizations table via foreign key constraints on the `org_id` column. This design ensures that every entity within the system is either directly or indirectly associated with an organization.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
ORGANIZATIONS {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
}
|
||||
ENTITY_A {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
string org_id FK
|
||||
}
|
||||
ENTITY_B {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
string org_id FK
|
||||
}
|
||||
|
||||
ORGANIZATIONS ||--o{ ENTITY_A : contains
|
||||
ORGANIZATIONS ||--o{ ENTITY_B : contains
|
||||
```
|
||||
|
||||
> 💡 **Note**: There are rare exceptions to the above star schema design. Consult with the maintainers before deviating from the above design.
|
||||
|
||||
All tables follow a consistent primary key pattern using a `id` column (referenced by the `types.Identifiable` struct) and include `created_at` and `updated_at` columns (referenced by the `types.TimeAuditable` struct) for audit purposes.
|
||||
|
||||
## How to write migrations?
|
||||
|
||||
For schema migrations, use the [SQLMigration](/pkg/sqlmigration/sqlmigration.go) interface and write the migration in the same package. When creating migrations, adhere to these guidelines:
|
||||
|
||||
- Do not implement **`ON CASCADE` foreign key constraints**. Deletion operations should be handled explicitly in application logic rather than delegated to the database.
|
||||
- Do not **import types from the types package** in the `sqlmigration` package. Instead, define the required types within the migration package itself. This practice ensures migration stability as the core types evolve over time.
|
||||
- Do not implement **`Down` migrations**. As the codebase matures, we may introduce this capability, but for now, the `Down` function should remain empty.
|
||||
- Always write **idempotent** migrations. This means that if the migration is run multiple times, it should not cause an error.
|
||||
- A migration which is **dependent on the underlying dialect** (sqlite, postgres, etc) should be written as part of the [SQLDialect](/pkg/sqlstore/sqlstore.go) interface. The implementation needs to go in the dialect specific package of the respective database.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Use `BunDBCtx` and `RunInTxCtx` to access the database instance and execute transactions respectively.
|
||||
- While designing new tables, ensure the consistency of `id`, `created_at`, `updated_at` and an `org_id` column with a foreign key constraint to the `organizations` table (unless the table serves as a transitive entity not directly associated with an organization but indirectly associated with one).
|
||||
- Implement deletion logic in the application rather than relying on cascading deletes in the database.
|
||||
- While writing migrations, adhere to the guidelines mentioned above.
|
||||
BIN
docs/img/otel-demo-docker-containers.png
Normal file
BIN
docs/img/otel-demo-docker-containers.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 143 KiB |
BIN
docs/img/otel-demo-helm.png
Normal file
BIN
docs/img/otel-demo-helm.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 157 KiB |
BIN
docs/img/otel-demo-pods.png
Normal file
BIN
docs/img/otel-demo-pods.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 76 KiB |
BIN
docs/img/otel-demo-services.png
Normal file
BIN
docs/img/otel-demo-services.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 306 KiB |
246
docs/otel-demo-docs.md
Normal file
246
docs/otel-demo-docs.md
Normal file
@@ -0,0 +1,246 @@
|
||||
# Configuring OpenTelemetry Demo App with SigNoz
|
||||
|
||||
[The OpenTelemetry Astronomy Shop](https://github.com/open-telemetry/opentelemetry-demo) is an e-commerce web application, with **15 core microservices** in a **distributed system** which communicate over gRPC. Designed as a **polyglot** environment, it leverages a diverse set of programming languages, including Go, Python, .NET, Java, and others, showcasing cross-language instrumentation with OpenTelemetry. The intention is to get a quickstart application to send data and experience SigNoz firsthand.
|
||||
|
||||
This guide provides a step-by-step walkthrough for setting up the **OpenTelemetry Demo App** with **SigNoz** as backend for observability. It outlines steps to export telemetry data to **SigNoz self-hosted with Docker**, **SigNoz self-hosted with Kubernetes** and **SigNoz cloud**.
|
||||
<br/>
|
||||
|
||||
__Table of Contents__
|
||||
- [Send data to SigNoz Self-hosted with Docker](#send-data-to-signoz-self-hosted-with-docker)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Clone the OpenTelemetry Demo App Repository](#clone-the-opentelemetry-demo-app-repository)
|
||||
- [Modify OpenTelemetry Collector Config](#modify-opentelemetry-collector-config)
|
||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app)
|
||||
- [Monitor with SigNoz (Docker)](#monitor-with-signoz-docker)
|
||||
- [Send data to SigNoz Self-hosted with Kubernetes](#send-data-to-signoz-self-hosted-with-kubernetes)
|
||||
- [Prerequisites](#prerequisites-1)
|
||||
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
|
||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
|
||||
- [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
|
||||
- [What's next](#whats-next)
|
||||
|
||||
|
||||
# Send data to SigNoz Self-hosted with Docker
|
||||
|
||||
In this guide you will install the OTel demo application using Docker and send telemetry data to SigNoz hosted with Docker, referred as SigNoz [Docker] from now.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
- Docker and Docker Compose installed
|
||||
- 6 GB of RAM for the application [as per OpenTelemetry documentation]
|
||||
- Nice to have Docker Desktop, for easy monitoring
|
||||
|
||||
|
||||
## Clone the OpenTelemetry Demo App Repository
|
||||
Clone the OTel demo app to any folder of your choice.
|
||||
```sh
|
||||
# Clone the OpenTelemetry Demo repository
|
||||
git clone https://github.com/open-telemetry/opentelemetry-demo.git
|
||||
cd opentelemetry-demo
|
||||
```
|
||||
|
||||
## Modify OpenTelemetry Collector Config
|
||||
|
||||
By default, the collector in the demo application will merge the configuration from two files:
|
||||
|
||||
1. otelcol-config.yml [we don't touch this]
|
||||
2. otelcol-config-extras.yml [we modify this]
|
||||
|
||||
To add SigNoz [Docker] as the backend, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
|
||||
```yaml
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "http://host.docker.internal:4317"
|
||||
tls:
|
||||
insecure: true
|
||||
debug:
|
||||
verbosity: detailed
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
|
||||
The SigNoz OTel collector [sigNoz's otel-collector service] listens at 4317 port on localhost. When the OTel demo app is running within a Docker container and needs to transmit telemetry data to SigNoz, it cannot directly reference 'localhost' as this would refer to the container's own internal network. Instead, Docker provides a special DNS name, `host.docker.internal`, which resolves to the host machine's IP address from within containers. By configuring the OpenTelemetry Demo application to send data to `host.docker.internal:4317`, we establish a network path that allows the containerized application to transmit telemetry data across the container boundary to the SigNoz OTel collector running on the host machine's port 4317.
|
||||
|
||||
>
|
||||
> Note: When merging extra configuration values with the existing collector config (`src/otel-collector/otelcol-config.yml`), objects are merged and arrays are replaced resulting in previous pipeline configurations getting overridden.
|
||||
The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
|
||||
>
|
||||
<br>
|
||||
<u>To send data to SigNoz Cloud</u>
|
||||
|
||||
If you want to send data to cloud instead, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
|
||||
```yaml
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
signoz-access-token: <SIGNOZ-KEY>
|
||||
debug:
|
||||
verbosity: detailed
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
Remember to replace the region and ingestion key with proper values as obtained from your account.
|
||||
|
||||
|
||||
## Start the OpenTelemetry Demo App
|
||||
|
||||
Both SigNoz and OTel demo app [frontend-proxy service, to be accurate] share common port allocation at 8080. To prevent port allocation conflicts, modify the OTel demo application config to use port 8081 as the `ENVOY_PORT` value as shown below, and run docker compose command.
|
||||
|
||||
```sh
|
||||
ENVOY_PORT=8081 docker compose up -d
|
||||
```
|
||||
This spins up multiple microservices, with OpenTelemetry instrumentation enabled. you can verify this by,
|
||||
```sh
|
||||
docker compose ps -a
|
||||
```
|
||||
The result should look similar to this,
|
||||

|
||||
|
||||
|
||||
|
||||
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Docker].
|
||||
|
||||
## Monitor with SigNoz [Docker]
|
||||
Signoz exposes its UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
|
||||
|
||||
|
||||

|
||||
|
||||
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Docker] as expected.
|
||||
|
||||
|
||||
# Send data to SigNoz Self-hosted with Kubernetes
|
||||
|
||||
In this guide you will install the OTel demo application using Helm and send telemetry data to SigNoz hosted with Kubernetes, referred as SigNoz [Kubernetes] from now.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Helm charts installed
|
||||
- 6 GB of free RAM for the application [as per OpenTelemetry documentation]
|
||||
- A kubernetes cluster (EKS, GKE, Minikube)
|
||||
- kubectl [CLI for Kubernetes]
|
||||
|
||||
>Note: We will be installing OTel demo app using Helm charts, since it is recommended by OpenTelemetry. If you wish to install using kubectl, follow [this](https://opentelemetry.io/docs/demo/kubernetes-deployment/#install-using-kubectl).
|
||||
|
||||
|
||||
## Install Helm Repo and Charts
|
||||
You’ll need to **install the Helm repository** to start sending data to SigNoz cloud.
|
||||
|
||||
```sh
|
||||
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
|
||||
```
|
||||
The OpenTelemetry Collector’s configuration is exposed in the Helm chart. All additions made will be merged into the default configuration. We use this capability to add SigNoz as an exporter, and make pipelines as desired.
|
||||
|
||||
For this we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
|
||||
|
||||
```yaml
|
||||
default:
|
||||
env:
|
||||
- name: OTEL_SERVICE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: "metadata.labels['app.kubernetes.io/component']"
|
||||
- name: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
|
||||
value: cumulative
|
||||
- name: OTEL_RESOURCE_ATTRIBUTES
|
||||
value: 'service.name=$(OTEL_SERVICE_NAME),service.namespace=opentelemetry-demo'
|
||||
- name: OTEL_COLLECTOR_NAME
|
||||
value: signoz-otel-collector.<namespace>.svc.cluster.local
|
||||
```
|
||||
Replace namespace with your appropriate namespace. This file will replace the chart’s existing settings with our new ones, ensuring telemetry data is sent to SigNoz [Kubernetes].
|
||||
|
||||
> Note: When merging YAML values with Helm, objects are merged and arrays are replaced. The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
|
||||
|
||||
<br>
|
||||
<u>To send data to SigNoz cloud</u>
|
||||
|
||||
If you wish to send data to cloud instance of SigNoz, we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
|
||||
|
||||
```sh
|
||||
opentelemetry-collector:
|
||||
config:
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
signoz-access-token: <SIGNOZ-KEY>
|
||||
debug:
|
||||
verbosity: detailed
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
Make sure to replace the region and key with values obtained from the account
|
||||
|
||||
Now **install the helm chart** with a release name and namespace of your choice. Let's take *my-otel-demo* as the release name and *otel-demo* as the namespace for the context of the code snippet below,
|
||||
|
||||
```sh
|
||||
# Create a new Kubernetes namespace called "otel-demo"
|
||||
kubectl create namespace otel-demo
|
||||
# Install the OpenTelemetry Demo Helm chart with the release name "my-otel-demo"
|
||||
helm install my-otel-demo open-telemetry/opentelemetry-demo --namespace otel-demo -f values.yaml
|
||||
```
|
||||
You should see a similar output on your terminal,
|
||||

|
||||
|
||||
To verify if all the pods are running,
|
||||
```sh
|
||||
kubectl get pods -n otel-demo
|
||||
```
|
||||
The output should look similar to this,
|
||||
|
||||

|
||||
|
||||
## Start the OpenTelemetry Demo App
|
||||
|
||||
To expose the OTel demo app UI [frontend-proxy service] use the following command (replace my-otel-demo with your Helm chart release name):
|
||||
|
||||
```sh
|
||||
kubectl port-forward svc/my-otel-demo-frontend-proxy 8080:8081
|
||||
```
|
||||
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Kubernetes].
|
||||
|
||||
|
||||
|
||||
## Monitor with SigNoz [Kubernetes]
|
||||
Signoz exposes it's UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
|
||||
|
||||
|
||||

|
||||
|
||||
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Kubernetes] as expected.
|
||||
|
||||
|
||||
|
||||
# What's next?
|
||||
|
||||
|
||||
|
||||
Don't forget to check our OpenTelemetry [track](https://signoz.io/resource-center/opentelemetry/), guaranteed to take you from a newbie to sensei in no time!
|
||||
|
||||
Also from a fellow OTel fan to another, we at [SigNoz](https://signoz.io/) are building an open-source, OTel native, observability platform (one of its kind). So, show us love - star us on [GitHub](https://github.com/SigNoz/signoz), nitpick our [docs](https://signoz.io/docs/introduction/), or just tell your app we’re the ones who’ll catch its crashes mid-flight and finally shush all the 3am panic calls!
|
||||
@@ -2,22 +2,31 @@ package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Pat struct {
|
||||
store sqlstore.SQLStore
|
||||
uuid *authtypes.UUID
|
||||
headers []string
|
||||
}
|
||||
|
||||
func NewPat(headers []string) *Pat {
|
||||
return &Pat{uuid: authtypes.NewUUID(), headers: headers}
|
||||
func NewPat(store sqlstore.SQLStore, headers []string) *Pat {
|
||||
return &Pat{store: store, uuid: authtypes.NewUUID(), headers: headers}
|
||||
}
|
||||
|
||||
func (p *Pat) Wrap(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var values []string
|
||||
var patToken string
|
||||
var pat eeTypes.StorablePersonalAccessToken
|
||||
|
||||
for _, header := range p.headers {
|
||||
values = append(values, r.Header.Get(header))
|
||||
}
|
||||
@@ -27,10 +36,56 @@ func (p *Pat) Wrap(next http.Handler) http.Handler {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
patToken, ok := authtypes.UUIDFromContext(ctx)
|
||||
if !ok {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
err = p.store.BunDB().NewSelect().Model(&pat).Where("token = ?", patToken).Scan(r.Context())
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// get user from db
|
||||
user := types.User{}
|
||||
err = p.store.BunDB().NewSelect().Model(&user).Where("id = ?", pat.UserID).Scan(r.Context())
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
role, err := types.NewRole(user.Role)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
jwt := authtypes.Claims{
|
||||
UserID: user.ID.String(),
|
||||
Role: role,
|
||||
Email: user.Email,
|
||||
OrgID: user.OrgID,
|
||||
}
|
||||
|
||||
ctx = authtypes.NewContextWithClaims(ctx, jwt)
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
pat.LastUsed = time.Now().Unix()
|
||||
_, err = p.store.BunDB().NewUpdate().Model(&pat).Column("last_used").Where("token = ?", patToken).Where("revoked = false").Exec(r.Context())
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
203
ee/modules/user/impluser/handler.go
Normal file
203
ee/modules/user/impluser/handler.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package impluser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// EnterpriseHandler embeds the base handler implementation
|
||||
type Handler struct {
|
||||
user.Handler // Embed the base handler interface
|
||||
module user.Module
|
||||
}
|
||||
|
||||
func NewHandler(module user.Module) user.Handler {
|
||||
baseHandler := impluser.NewHandler(module)
|
||||
return &Handler{
|
||||
Handler: baseHandler,
|
||||
module: module,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) Login(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var req types.PostableLoginRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if req.RefreshToken == "" {
|
||||
// the EE handler wrapper passes the feature flag value in context
|
||||
ssoAvailable, ok := ctx.Value(types.SSOAvailable).(bool)
|
||||
if !ok {
|
||||
render.Error(w, errors.New(errors.TypeInternal, errors.CodeInternal, "failed to retrieve SSO availability"))
|
||||
return
|
||||
}
|
||||
|
||||
if ssoAvailable {
|
||||
_, err := h.module.CanUsePassword(ctx, req.Email)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
user, err := h.module.GetAuthenticatedUser(ctx, req.OrgID, req.Email, req.Password, req.RefreshToken)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
jwt, err := h.module.GetJWTForUser(ctx, user)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableLoginResponse := &types.GettableLoginResponse{
|
||||
GettableUserJwt: jwt,
|
||||
UserID: user.ID.String(),
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, gettableLoginResponse)
|
||||
}
|
||||
|
||||
// Override only the methods you need with enterprise-specific implementations
|
||||
func (h *Handler) LoginPrecheck(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// assume user is valid unless proven otherwise and assign default values for rest of the fields
|
||||
|
||||
email := r.URL.Query().Get("email")
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
orgID := r.URL.Query().Get("orgID")
|
||||
|
||||
resp, err := h.module.LoginPrecheck(ctx, orgID, email, sourceUrl)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, resp)
|
||||
|
||||
}
|
||||
|
||||
func (h *Handler) AcceptInvite(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := new(types.PostableAcceptInvite)
|
||||
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to decode user"))
|
||||
return
|
||||
}
|
||||
|
||||
// get invite object
|
||||
invite, err := h.module.GetInviteByToken(ctx, req.InviteToken)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgDomain, err := h.module.GetAuthDomainByEmail(ctx, invite.Email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp := &types.GettableLoginPrecheck{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if invite.Name == "" && req.DisplayName != "" {
|
||||
invite.Name = req.DisplayName
|
||||
}
|
||||
|
||||
user, err := types.NewUser(invite.Name, invite.Email, invite.Role, invite.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if orgDomain != nil && orgDomain.SsoEnabled {
|
||||
// sso is enabled, create user and respond precheck data
|
||||
err = h.module.CreateUser(ctx, user)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
// check if sso is enforced for the org
|
||||
precheckResp, err = h.module.LoginPrecheck(ctx, invite.OrgID, user.Email, req.SourceURL)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
} else {
|
||||
password, err := types.NewFactorPassword(req.Password)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
user, err = h.module.CreateUserWithPassword(ctx, user, password)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp.IsUser = true
|
||||
}
|
||||
|
||||
// delete the invite
|
||||
if err := h.module.DeleteInvite(ctx, invite.OrgID, invite.ID); err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, precheckResp)
|
||||
}
|
||||
|
||||
func (h *Handler) GetInvite(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
token := mux.Vars(r)["token"]
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
invite, err := h.module.GetInviteByToken(ctx, token)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
// precheck the user
|
||||
precheckResp, err := h.module.LoginPrecheck(ctx, invite.OrgID, invite.Email, sourceUrl)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableInvite := &types.GettableEEInvite{
|
||||
GettableInvite: *invite,
|
||||
PreCheck: precheckResp,
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, gettableInvite)
|
||||
return
|
||||
}
|
||||
229
ee/modules/user/impluser/module.go
Normal file
229
ee/modules/user/impluser/module.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package impluser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
baseimpl "github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// EnterpriseModule embeds the base module implementation
|
||||
type Module struct {
|
||||
*baseimpl.Module // Embed the base module implementation
|
||||
store types.UserStore
|
||||
}
|
||||
|
||||
func NewModule(store types.UserStore) user.Module {
|
||||
baseModule := baseimpl.NewModule(store).(*baseimpl.Module)
|
||||
return &Module{
|
||||
Module: baseModule,
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Module) createUserForSAMLRequest(ctx context.Context, email string) (*types.User, error) {
|
||||
// get auth domain from email domain
|
||||
_, err := m.GetAuthDomainByEmail(ctx, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get name from email
|
||||
parts := strings.Split(email, "@")
|
||||
if len(parts) < 2 {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid email format")
|
||||
}
|
||||
name := parts[0]
|
||||
|
||||
defaultOrgID, err := m.store.GetDefaultOrgID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user, err := types.NewUser(name, email, types.RoleViewer.String(), defaultOrgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = m.CreateUser(ctx, user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (m *Module) PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (string, error) {
|
||||
users, err := m.GetUsersByEmail(ctx, email)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", err.Error()))
|
||||
return "", err
|
||||
}
|
||||
user := &types.User{}
|
||||
|
||||
if len(users) == 0 {
|
||||
newUser, err := m.createUserForSAMLRequest(ctx, email)
|
||||
user = newUser
|
||||
if err != nil {
|
||||
zap.L().Error("failed to create user with email received from auth provider", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
user = &users[0].User
|
||||
}
|
||||
|
||||
tokenStore, err := m.GetJWTForUser(ctx, user)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
user.ID,
|
||||
tokenStore.RefreshJwt), nil
|
||||
}
|
||||
|
||||
func (m *Module) CanUsePassword(ctx context.Context, email string) (bool, error) {
|
||||
domain, err := m.GetAuthDomainByEmail(ctx, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, check if the user has admin role
|
||||
users, err := m.GetUsersByEmail(ctx, email)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(users) == 0 {
|
||||
return false, errors.New(errors.TypeNotFound, errors.CodeNotFound, "user not found")
|
||||
}
|
||||
|
||||
if users[0].Role != types.RoleAdmin.String() {
|
||||
return false, errors.New(errors.TypeForbidden, errors.CodeForbidden, "auth method not supported")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Module) LoginPrecheck(ctx context.Context, orgID, email, sourceUrl string) (*types.GettableLoginPrecheck, error) {
|
||||
resp := &types.GettableLoginPrecheck{IsUser: true, CanSelfRegister: false}
|
||||
|
||||
// check if email is a valid user
|
||||
users, err := m.GetUsersByEmail(ctx, email)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(users) == 0 {
|
||||
resp.IsUser = false
|
||||
}
|
||||
|
||||
// give them an option to select an org
|
||||
if orgID == "" && len(users) > 1 {
|
||||
resp.SelectOrg = true
|
||||
resp.Orgs = make([]string, len(users))
|
||||
for i, user := range users {
|
||||
resp.Orgs[i] = user.OrgID
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// select the user with the corresponding orgID
|
||||
if len(users) > 1 {
|
||||
found := false
|
||||
for _, tuser := range users {
|
||||
if tuser.OrgID == orgID {
|
||||
// user = tuser
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
resp.IsUser = false
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// the EE handler wrapper passes the feature flag value in context
|
||||
ssoAvailable, ok := ctx.Value(types.SSOAvailable).(bool)
|
||||
if !ok {
|
||||
zap.L().Error("failed to retrieve ssoAvailable from context")
|
||||
return nil, errors.New(errors.TypeInternal, errors.CodeInternal, "failed to retrieve SSO availability")
|
||||
}
|
||||
|
||||
if ssoAvailable {
|
||||
|
||||
// TODO(Nitya): in multitenancy this should use orgId as well.
|
||||
orgDomain, err := m.GetAuthDomainByEmail(ctx, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if orgDomain != nil && orgDomain.SsoEnabled {
|
||||
// this is to allow self registration
|
||||
resp.IsUser = true
|
||||
|
||||
// saml is enabled for this domain, lets prepare sso url
|
||||
if sourceUrl == "" {
|
||||
sourceUrl = constants.GetDefaultSiteURL()
|
||||
}
|
||||
|
||||
// parse source url that generated the login request
|
||||
var err error
|
||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||
siteUrl, err := url.Parse(escapedUrl)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to parse referer")
|
||||
}
|
||||
|
||||
// build Idp URL that will authenticat the user
|
||||
// the front-end will redirect user to this url
|
||||
resp.SSOUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||
return nil, errors.New(errors.TypeInternal, errors.CodeInternal, "failed to prepare saml request for domain")
|
||||
}
|
||||
|
||||
// set SSO to true, as the url is generated correctly
|
||||
resp.SSO = true
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *Module) GetAuthDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, error) {
|
||||
|
||||
if email == "" {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "email is required")
|
||||
}
|
||||
|
||||
components := strings.Split(email, "@")
|
||||
if len(components) < 2 {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid email format")
|
||||
}
|
||||
|
||||
domain, err := m.store.GetDomainByName(ctx, components[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gettableDomain := &types.GettableOrgDomain{StorableOrgDomain: *domain}
|
||||
if err := gettableDomain.LoadConfig(domain.Data); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to load domain config")
|
||||
}
|
||||
return gettableDomain, nil
|
||||
}
|
||||
37
ee/modules/user/impluser/store.go
Normal file
37
ee/modules/user/impluser/store.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package impluser
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
baseimpl "github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
type store struct {
|
||||
*baseimpl.Store
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewStore(sqlstore sqlstore.SQLStore) types.UserStore {
|
||||
baseStore := baseimpl.NewStore(sqlstore).(*baseimpl.Store)
|
||||
return &store{
|
||||
Store: baseStore,
|
||||
sqlstore: sqlstore,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *store) GetDomainByName(ctx context.Context, name string) (*types.StorableOrgDomain, error) {
|
||||
domain := new(types.StorableOrgDomain)
|
||||
err := s.sqlstore.BunDB().NewSelect().
|
||||
Model(domain).
|
||||
Where("name = ?", name).
|
||||
Limit(1).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeNotFound, errors.CodeNotFound, "failed to get domain from name")
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
@@ -30,15 +30,15 @@ builds:
|
||||
- v8.0
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
|
||||
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
|
||||
- -X main.builtBy=goreleaser
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildVersion={{ .Version }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildTime={{ .Date }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
|
||||
- -X go.signoz.io/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X go.signoz.io/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- -X github.com/SigNoz/signoz/pkg/version.version=v{{ .Version }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
|
||||
@@ -1,34 +1,21 @@
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.20.3
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
|
||||
# define arguments that can be passed during build time
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
# add ca-certificates in case you need them
|
||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||
|
||||
# set working directory
|
||||
WORKDIR /root
|
||||
|
||||
# copy the signoz binary
|
||||
COPY ee/query-service/bin/signoz-${TARGETOS}-${TARGETARCH} /root/signoz
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
|
||||
# copy prometheus YAML config
|
||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||
COPY pkg/query-service/templates /root/templates
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Make signoz executable for non-root users
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
# Copy frontend
|
||||
COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
# run the binary
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
|
||||
EXPOSE 8080
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
36
ee/query-service/Dockerfile.integration
Normal file
36
ee/query-service/Dockerfile.integration
Normal file
@@ -0,0 +1,36 @@
|
||||
FROM golang:1.23-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
ARG ZEUSURL
|
||||
|
||||
# This path is important for stacktraces
|
||||
WORKDIR $GOPATH/src/github.com/signoz/signoz
|
||||
WORKDIR /root
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
gcc \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY ./ee/ ./ee/
|
||||
COPY ./pkg/ ./pkg/
|
||||
COPY ./templates/email /root/templates
|
||||
|
||||
COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz"]
|
||||
22
ee/query-service/Dockerfile.multi-arch
Normal file
22
ee/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -3,8 +3,9 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
@@ -28,17 +29,16 @@ func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvi
|
||||
}
|
||||
|
||||
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
FeatureLookup: dp.ff,
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
})
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, req)
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
|
||||
@@ -3,8 +3,9 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
@@ -28,17 +29,16 @@ func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyPr
|
||||
}
|
||||
|
||||
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
FeatureLookup: hp.ff,
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
})
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, req)
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
type Seasonality string
|
||||
|
||||
@@ -2,8 +2,10 @@ package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -38,12 +39,6 @@ func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericPr
|
||||
}
|
||||
}
|
||||
|
||||
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().ff = ff
|
||||
}
|
||||
}
|
||||
|
||||
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().reader = reader
|
||||
@@ -56,7 +51,6 @@ type BaseSeasonalProvider struct {
|
||||
fluxInterval time.Duration
|
||||
cache cache.Cache
|
||||
keyGenerator cache.KeyGenerator
|
||||
ff interfaces.FeatureLookup
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
|
||||
@@ -66,9 +60,9 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,7 +73,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,7 +84,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -101,7 +95,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -112,7 +106,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -123,7 +117,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -313,6 +307,9 @@ func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
if expectedValue < 0 {
|
||||
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
@@ -339,9 +336,9 @@ func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
|
||||
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,8 +3,9 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
@@ -27,17 +28,16 @@ func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyPr
|
||||
}
|
||||
|
||||
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
FeatureLookup: wp.ff,
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
})
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, req)
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
|
||||
@@ -1,33 +1,41 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/dao"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/ee/query-service/license"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/apis/fields"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/quickfilter"
|
||||
quickfilterscore "github.com/SigNoz/signoz/pkg/modules/quickfilter/core"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
"go.signoz.io/signoz/pkg/alertmanager"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferSpanMetrics bool
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
@@ -37,7 +45,6 @@ type APIHandlerOptions struct {
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
@@ -54,23 +61,22 @@ type APIHandler struct {
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
|
||||
|
||||
quickfiltermodule := quickfilterscore.NewQuickFilters(quickfilterscore.NewStore(signoz.SQLStore))
|
||||
quickFilter := quickfilter.NewAPI(quickfiltermodule)
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||
FieldsAPI: fields.NewAPI(signoz.TelemetryStore),
|
||||
Signoz: signoz,
|
||||
QuickFilters: quickFilter,
|
||||
QuickFilterModule: quickfiltermodule,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -114,50 +120,30 @@ func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
am.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/featureFlags", am.OpenAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/loginPrecheck", am.OpenAccess(ah.loginPrecheck)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
am.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
// invite
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/accept", am.OpenAccess(ah.acceptInvite)).Methods(http.MethodPost)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
am.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/complete/google", am.OpenAccess(ah.receiveGoogleAuth)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains", am.AdminAccess(ah.listDomainsByOrg)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/complete/google",
|
||||
am.OpenAccess(ah.receiveGoogleAuth)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
am.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
am.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
router.HandleFunc("/api/v1/domains", am.AdminAccess(ah.postDomain)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/domains/{id}", am.AdminAccess(ah.putDomain)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/domains/{id}", am.AdminAccess(ah.deleteDomain)).Methods(http.MethodDelete)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
|
||||
// PAT APIs
|
||||
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||
@@ -188,7 +174,55 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
// TODO(nitya): remove this once we know how to get the FF's
|
||||
func (ah *APIHandler) updateRequestContext(w http.ResponseWriter, r *http.Request) (*http.Request, error) {
|
||||
ssoAvailable := true
|
||||
err := ah.FF().CheckFeature(model.SSO)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case basemodel.ErrFeatureUnavailable:
|
||||
// do nothing, just skip sso
|
||||
ssoAvailable = false
|
||||
default:
|
||||
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||
return r, errors.New(errors.TypeInternal, errors.CodeInternal, "error checking SSO feature")
|
||||
}
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), types.SSOAvailable, ssoAvailable)
|
||||
return r.WithContext(ctx), nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) loginPrecheck(w http.ResponseWriter, r *http.Request) {
|
||||
r, err := ah.updateRequestContext(w, r)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
ah.Signoz.Handlers.User.LoginPrecheck(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
func (ah *APIHandler) acceptInvite(w http.ResponseWriter, r *http.Request) {
|
||||
r, err := ah.updateRequestContext(w, r)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
ah.Signoz.Handlers.User.AcceptInvite(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
r, err := ah.updateRequestContext(w, r)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
ah.Signoz.Handlers.User.GetInvite(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
|
||||
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||
|
||||
@@ -200,9 +234,8 @@ func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *ba
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
versionResponse := basemodel.GetVersionResponse{
|
||||
Version: version,
|
||||
Version: version.Info.Version(),
|
||||
EE: "Y",
|
||||
SetupCompleted: ah.SetupCompleted,
|
||||
}
|
||||
|
||||
@@ -9,13 +9,11 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
)
|
||||
|
||||
func parseRequest(r *http.Request, req interface{}) error {
|
||||
@@ -31,162 +29,13 @@ func parseRequest(r *http.Request, req interface{}) error {
|
||||
|
||||
// loginUser overrides base handler and considers SSO case.
|
||||
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := basemodel.LoginRequest{}
|
||||
err := parseRequest(r, &req)
|
||||
r, err := ah.updateRequestContext(w, r)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||
var apierr basemodel.BaseApiError
|
||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||
if apierr != nil && !apierr.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// if all looks good, call auth
|
||||
resp, err := baseauth.Login(ctx, &req, ah.opts.JWT)
|
||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// registerUser registers a user and responds with a precheck
|
||||
// so the front-end can decide the login method
|
||||
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
ah.APIHandler.Register(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var req *baseauth.RegisterRequest
|
||||
|
||||
defer r.Body.Close()
|
||||
requestBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
zap.L().Error("received no input in api", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if invite == nil {
|
||||
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &basemodel.PrecheckResponse{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, create user and respond precheck data
|
||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var precheckError basemodel.BaseApiError
|
||||
|
||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||
if precheckError != nil {
|
||||
RespondError(w, precheckError, precheckResp)
|
||||
}
|
||||
|
||||
} else {
|
||||
// no-sso, validate password
|
||||
if err := baseauth.ValidatePassword(req.Password); err != nil {
|
||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager)
|
||||
if !registerError.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp.IsUser = true
|
||||
}
|
||||
|
||||
ah.Respond(w, precheckResp)
|
||||
}
|
||||
|
||||
// getInvite returns the invite object details for the given invite token. We do not need to
|
||||
// protect this API because invite token itself is meant to be private.
|
||||
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
token := mux.Vars(r)["token"]
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
ctx := context.Background()
|
||||
|
||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp := model.GettableInvitation{
|
||||
InvitationResponseObject: inviteObject,
|
||||
}
|
||||
|
||||
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
|
||||
resp.Precheck = precheck
|
||||
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// PrecheckLogin enables browser login page to display appropriate
|
||||
// login methods
|
||||
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
email := r.URL.Query().Get("email")
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
|
||||
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.Respond(w, resp)
|
||||
ah.Signoz.Handlers.User.Login(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
@@ -253,7 +102,7 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
|
||||
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
@@ -331,7 +180,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
|
||||
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,12 @@ type CloudIntegrationConnectionParamsResponse struct {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
if cloudProvider != "aws" {
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf(
|
||||
@@ -38,15 +44,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
currentUser, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
|
||||
"couldn't deduce current user: %w", err,
|
||||
)), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgID, cloudProvider)
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't provision PAT for cloud integration:",
|
||||
@@ -118,14 +116,14 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
return "", apiErr
|
||||
}
|
||||
|
||||
allPats, err := ah.AppDao().ListPATs(ctx)
|
||||
allPats, err := ah.AppDao().ListPATs(ctx, orgId)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't list PATs: %w", err,
|
||||
))
|
||||
}
|
||||
for _, p := range allPats {
|
||||
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
|
||||
if p.UserID == integrationUser.ID.String() && p.Name == integrationPATName {
|
||||
return p.Token, nil
|
||||
}
|
||||
}
|
||||
@@ -135,16 +133,13 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newPAT := model.PAT{
|
||||
Token: generatePATToken(),
|
||||
UserID: integrationUser.ID,
|
||||
Name: integrationPATName,
|
||||
Role: baseconstants.ViewerGroup,
|
||||
ExpiresAt: 0,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
UpdatedAt: time.Now().Unix(),
|
||||
}
|
||||
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
|
||||
newPAT := eeTypes.NewGettablePAT(
|
||||
integrationPATName,
|
||||
types.RoleViewer.String(),
|
||||
integrationUser.ID.String(),
|
||||
0,
|
||||
)
|
||||
integrationPAT, err := ah.AppDao().CreatePAT(ctx, orgId, newPAT)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
@@ -156,11 +151,12 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
|
||||
integrationUserResult, err := ah.Signoz.Modules.User.GetUserByEmailInOrg(ctx, orgId, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, basemodel.NotFoundError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
if integrationUserResult != nil {
|
||||
@@ -172,33 +168,18 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newUser := &types.User{
|
||||
ID: cloudIntegrationUserId,
|
||||
Name: fmt.Sprintf("%s integration", cloudProvider),
|
||||
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgId,
|
||||
}
|
||||
|
||||
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
|
||||
}
|
||||
newUser.GroupID = viewerGroup.ID
|
||||
|
||||
passwordHash, err := auth.PasswordHash(uuid.NewString())
|
||||
newUser, err := types.NewUser(cloudIntegrationUser, email, types.RoleViewer.String(), orgId)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't hash random password for cloud integration user: %w", err,
|
||||
"couldn't create cloud integration user: %w", err,
|
||||
))
|
||||
}
|
||||
newUser.Password = passwordHash
|
||||
|
||||
integrationUser, apiErr := ah.AppDao().CreateUser(ctx, newUser, false)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't create cloud integration user")
|
||||
password, err := types.NewFactorPassword(uuid.NewString())
|
||||
|
||||
integrationUser, err := ah.Signoz.Modules.User.CreateUserWithPassword(ctx, newUser, password)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||
}
|
||||
|
||||
return integrationUser, nil
|
||||
|
||||
@@ -4,12 +4,10 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -36,26 +34,27 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
|
||||
|
||||
dashboard, err := ah.Signoz.Modules.Dashboard.Get(r.Context(), claims.OrgID, uuid)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !auth.IsAdminV2(claims) && (dashboard.CreatedBy != claims.Email) {
|
||||
if err := claims.IsAdmin(); err != nil && (dashboard.CreatedBy != claims.Email) {
|
||||
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
// Lock/Unlock the dashboard
|
||||
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
|
||||
err = ah.Signoz.Modules.Dashboard.LockUnlock(r.Context(), claims.OrgID, uuid, lock)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -24,7 +25,7 @@ func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.OrgDomain{}
|
||||
req := types.GettableOrgDomain{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
@@ -54,12 +55,12 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
req := model.OrgDomain{Id: domainId}
|
||||
req := types.GettableOrgDomain{StorableOrgDomain: types.StorableOrgDomain{ID: domainId}}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
req.Id = domainId
|
||||
req.ID = domainId
|
||||
if err := req.Valid(nil); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ package api
|
||||
import (
|
||||
"testing"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestMergeFeatureSets(t *testing.T) {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
@@ -5,10 +5,12 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type DayWiseBreakdown struct {
|
||||
@@ -90,8 +92,13 @@ func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// this function is called by zeus when inserting licenses in the query-service
|
||||
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
var licenseKey ApplyLicenseRequest
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
var licenseKey ApplyLicenseRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
@@ -102,9 +109,10 @@ func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
_, err = ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
|
||||
if err != nil {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED, map[string]interface{}{"err": err.Error()}, claims.Email, true, false)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -112,10 +120,9 @@ func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
apiError := ah.LM().RefreshLicense(r.Context())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
err := ah.LM().RefreshLicense(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -127,7 +134,6 @@ func getCheckoutPortalResponse(redirectURL string) *Redirect {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
checkoutRequest := &model.CheckoutRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(checkoutRequest); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
@@ -140,9 +146,9 @@ func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
redirectUrl, err := signozio.CheckoutSession(r.Context(), checkoutRequest, license.Key)
|
||||
redirectUrl, err := signozio.CheckoutSession(r.Context(), checkoutRequest, license.Key, ah.Signoz.Zeus)
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -230,7 +236,6 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
portalRequest := &model.PortalRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(portalRequest); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
@@ -243,9 +248,9 @@ func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
redirectUrl, err := signozio.PortalSession(r.Context(), portalRequest, license.Key)
|
||||
redirectUrl, err := signozio.PortalSession(r.Context(), portalRequest, license.Key, ah.Signoz.Zeus)
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,73 +1,53 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func generatePATToken() string {
|
||||
// Generate a 32-byte random token.
|
||||
token := make([]byte, 32)
|
||||
rand.Read(token)
|
||||
// Encode the token in base64.
|
||||
encodedToken := base64.StdEncoding.EncodeToString(token)
|
||||
return encodedToken
|
||||
}
|
||||
|
||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.CreatePATRequestBody{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
pat := model.PAT{
|
||||
Name: req.Name,
|
||||
Role: req.Role,
|
||||
ExpiresAt: req.ExpiresInDays,
|
||||
}
|
||||
|
||||
pat := eeTypes.NewGettablePAT(
|
||||
req.Name,
|
||||
req.Role,
|
||||
claims.UserID,
|
||||
req.ExpiresInDays,
|
||||
)
|
||||
err = validatePATRequest(pat)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// All the PATs are associated with the user creating the PAT.
|
||||
pat.UserID = user.ID
|
||||
pat.CreatedAt = time.Now().Unix()
|
||||
pat.UpdatedAt = time.Now().Unix()
|
||||
pat.LastUsed = 0
|
||||
pat.Token = generatePATToken()
|
||||
|
||||
if pat.ExpiresAt != 0 {
|
||||
// convert expiresAt to unix timestamp from days
|
||||
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
|
||||
}
|
||||
|
||||
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||
var apierr basemodel.BaseApiError
|
||||
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||
if pat, apierr = ah.AppDao().CreatePAT(r.Context(), claims.OrgID, pat); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
@@ -75,34 +55,59 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ah.Respond(w, &pat)
|
||||
}
|
||||
|
||||
func validatePATRequest(req model.PAT) error {
|
||||
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||
return fmt.Errorf("valid role is required")
|
||||
func validatePATRequest(req eeTypes.GettablePAT) error {
|
||||
_, err := types.NewRole(req.Role)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if req.ExpiresAt < 0 {
|
||||
return fmt.Errorf("valid expiresAt is required")
|
||||
}
|
||||
|
||||
if req.Name == "" {
|
||||
return fmt.Errorf("valid name is required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.PAT{}
|
||||
req := eeTypes.GettablePAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
idStr := mux.Vars(r)["id"]
|
||||
id, err := valuer.NewUUID(idStr)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
|
||||
return
|
||||
}
|
||||
|
||||
//get the pat
|
||||
existingPAT, err := ah.AppDao().GetPATByID(r.Context(), claims.OrgID, id)
|
||||
if err != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// get the user
|
||||
createdByUser, err := ah.Signoz.Modules.User.GetUserByID(r.Context(), claims.OrgID, existingPAT.UserID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -112,12 +117,10 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
req.UpdatedByUserID = user.ID
|
||||
id := mux.Vars(r)["id"]
|
||||
req.UpdatedAt = time.Now().Unix()
|
||||
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||
req.UpdatedByUserID = claims.UserID
|
||||
req.UpdatedAt = time.Now()
|
||||
var apierr basemodel.BaseApiError
|
||||
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||
if apierr = ah.AppDao().UpdatePAT(r.Context(), claims.OrgID, req, id); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
@@ -126,38 +129,56 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
zap.L().Info("Get PATs for user", zap.String("user_id", user.ID))
|
||||
pats, apierr := ah.AppDao().ListPATs(ctx)
|
||||
|
||||
pats, apierr := ah.AppDao().ListPATs(r.Context(), claims.OrgID)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, pats)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
id := mux.Vars(r)["id"]
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("Revoke PAT with id", zap.String("id", id))
|
||||
if apierr := ah.AppDao().RevokePAT(ctx, id, user.ID); apierr != nil {
|
||||
idStr := mux.Vars(r)["id"]
|
||||
id, err := valuer.NewUUID(idStr)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
|
||||
return
|
||||
}
|
||||
|
||||
//get the pat
|
||||
existingPAT, paterr := ah.AppDao().GetPATByID(r.Context(), claims.OrgID, id)
|
||||
if paterr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// get the user
|
||||
createdByUser, err := ah.Signoz.Modules.User.GetUserByID(r.Context(), claims.OrgID, existingPAT.UserID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("Revoke PAT with id", zap.String("id", id.StringValue()))
|
||||
if apierr := ah.AppDao().RevokePAT(r.Context(), claims.OrgID, id, claims.UserID); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,15 +6,28 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
@@ -29,7 +42,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
queryRangeParams.Version = "v4"
|
||||
|
||||
// add temporality for each metric
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
@@ -85,34 +98,30 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
switch seasonality {
|
||||
case anomaly.SeasonalityWeekly:
|
||||
provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityDaily:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityHourly:
|
||||
provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
default:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
}
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), orgID, &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
|
||||
@@ -3,8 +3,8 @@ package api
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||
ah.APIHandler.SearchTraces(w, r)
|
||||
return
|
||||
}
|
||||
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
@@ -5,38 +5,35 @@ import (
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
)
|
||||
|
||||
type ClickhouseReader struct {
|
||||
conn clickhouse.Conn
|
||||
appdb *sqlx.DB
|
||||
appdb sqlstore.SQLStore
|
||||
*basechr.ClickHouseReader
|
||||
}
|
||||
|
||||
func NewDataConnector(
|
||||
localDB *sqlx.DB,
|
||||
ch clickhouse.Conn,
|
||||
promConfigPath string,
|
||||
lm interfaces.FeatureLookup,
|
||||
sqlDB sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickhouseReader {
|
||||
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, fluxIntervalForTraceDetail, cache)
|
||||
return &ClickhouseReader{
|
||||
conn: ch,
|
||||
appdb: localDB,
|
||||
conn: telemetryStore.ClickhouseDB(),
|
||||
appdb: sqlDB,
|
||||
ClickHouseReader: chReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) Start(readerReady chan bool) {
|
||||
r.ClickHouseReader.Start(readerReady)
|
||||
func (r *ClickhouseReader) GetSQLStore() sqlstore.SQLStore {
|
||||
return r.appdb
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -12,70 +11,54 @@ import (
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
eemiddleware "github.com/SigNoz/signoz/ee/http/middleware"
|
||||
"github.com/SigNoz/signoz/ee/query-service/app/api"
|
||||
"github.com/SigNoz/signoz/ee/query-service/app/db"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/dao/sqlite"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
eemiddleware "go.signoz.io/signoz/ee/http/middleware"
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/auth"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/alertmanager"
|
||||
"go.signoz.io/signoz/pkg/http/middleware"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
licensepkg "github.com/SigNoz/signoz/ee/query-service/license"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
PreferSpanMetrics bool
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
FluxIntervalForTraceDetail string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
Jwt *authtypes.JWT
|
||||
}
|
||||
|
||||
@@ -107,88 +90,40 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
modelDao := sqlite.NewModelDao(serverOptions.SigNoz.SQLStore)
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore.BunDB())
|
||||
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore, serverOptions.SigNoz.Zeus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set license manager as feature flag provider in dao
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
qb := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
reader := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(
|
||||
serverOptions.PromConfigPath,
|
||||
serverOptions.RuleRepoURL,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
serverOptions.SigNoz.Cache,
|
||||
serverOptions.SigNoz.Alertmanager,
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -217,7 +152,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
serverOptions.SigNoz.SQLStore, integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -233,7 +168,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.SigNoz.Zeus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -243,7 +178,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
telemetry.GetInstance().SetSqlStore(serverOptions.SigNoz.SQLStore)
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
telemetry.GetInstance().SetSavedViewsInfoCallback(telemetry.GetSavedViewsInfo)
|
||||
telemetry.GetInstance().SetAlertsInfoCallback(telemetry.GetAlertsInfo)
|
||||
telemetry.GetInstance().SetGetUsersCallback(telemetry.GetUsers)
|
||||
telemetry.GetInstance().SetUserCountCallback(telemetry.GetUserCount)
|
||||
telemetry.GetInstance().SetDashboardsInfoCallback(telemetry.GetDashboardsInfo)
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
if err != nil {
|
||||
@@ -252,7 +193,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
@@ -262,12 +202,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
GatewayUrl: serverOptions.GatewayUrl,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
JWT: serverOptions.Jwt,
|
||||
}
|
||||
|
||||
@@ -277,8 +214,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
// logger: logger,
|
||||
// tracer: tracer,
|
||||
ruleManager: rm,
|
||||
serverOptions: serverOptions,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
@@ -304,9 +239,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
&opAmpModel.AllAgents, agentConfMgr,
|
||||
)
|
||||
|
||||
errorList := qb.PreloadMetricsMetadata(context.Background())
|
||||
for _, er := range errorList {
|
||||
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
|
||||
orgs, err := apiHandler.Signoz.Modules.Organization.GetAll(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, org := range orgs {
|
||||
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
|
||||
for _, er := range errorList {
|
||||
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
@@ -317,7 +258,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
@@ -345,27 +286,11 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
// add auth middleware
|
||||
getUserFromRequest := func(ctx context.Context) (*types.GettableUser, error) {
|
||||
user, err := auth.GetUserFromRequestContext(ctx, apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user.User.OrgID == "" {
|
||||
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
am := middleware.NewAuthZ(s.serverOptions.SigNoz.Instrumentation.Logger())
|
||||
|
||||
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
@@ -378,6 +303,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||
apiHandler.RegisterFieldsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
@@ -439,14 +365,8 @@ func (s *Server) initListeners() error {
|
||||
}
|
||||
|
||||
// Start listening on http and private http port concurrently
|
||||
func (s *Server) Start() error {
|
||||
|
||||
// initiate rule manager first
|
||||
if !s.serverOptions.DisableRules {
|
||||
s.ruleManager.Start()
|
||||
} else {
|
||||
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||
}
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
s.ruleManager.Start(ctx)
|
||||
|
||||
err := s.initListeners()
|
||||
if err != nil {
|
||||
@@ -527,7 +447,7 @@ func (s *Server) Stop() error {
|
||||
s.opampServer.Stop()
|
||||
|
||||
if s.ruleManager != nil {
|
||||
s.ruleManager.Stop()
|
||||
s.ruleManager.Stop(context.Background())
|
||||
}
|
||||
|
||||
// stop usage manager
|
||||
@@ -537,39 +457,25 @@ func (s *Server) Stop() error {
|
||||
}
|
||||
|
||||
func makeRulesManager(
|
||||
promConfigPath,
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
alertmanager alertmanager.Alertmanager,
|
||||
sqlstore sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
) (*baserules.Manager, error) {
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
PqlEngine: pqle,
|
||||
RepoURL: ruleRepoURL,
|
||||
TelemetryStore: telemetryStore,
|
||||
Prometheus: prometheus,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
Alertmanager: alertmanager,
|
||||
SQLStore: sqlstore,
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler) (*types.GettableUser, error) {
|
||||
patToken, ok := authtypes.UUIDFromContext(ctx)
|
||||
if ok && patToken != "" {
|
||||
zap.L().Debug("Received a non-zero length PAT token")
|
||||
ctx := context.Background()
|
||||
dao := apiHandler.AppDao()
|
||||
|
||||
pat, err := dao.GetPAT(ctx, patToken)
|
||||
if err == nil && pat != nil {
|
||||
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
|
||||
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
|
||||
return nil, fmt.Errorf("PAT has expired")
|
||||
}
|
||||
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
user, err := dao.GetUser(ctx, pat.UserID)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
telemetry.GetInstance().SetPatTokenUser()
|
||||
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
|
||||
user.User.GroupID = group.ID
|
||||
user.User.ID = pat.Id
|
||||
return &types.GettableUser{
|
||||
User: user.User,
|
||||
Role: pat.Role,
|
||||
}, nil
|
||||
}
|
||||
if err != nil {
|
||||
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return baseauth.GetUserFromReqContext(ctx)
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
)
|
||||
|
||||
func InitDao(sqlStore sqlstore.SQLStore) (ModelDao, error) {
|
||||
return sqlite.InitDB(sqlStore)
|
||||
}
|
||||
@@ -4,43 +4,29 @@ import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type ModelDao interface {
|
||||
basedao.ModelDao
|
||||
|
||||
// SetFlagProvider sets the feature lookup provider
|
||||
SetFlagProvider(flags baseint.FeatureLookup)
|
||||
|
||||
DB() *sqlx.DB
|
||||
|
||||
// auth methods
|
||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError)
|
||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error)
|
||||
|
||||
// org domain (auth domains) CRUD ops
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
||||
ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError)
|
||||
CreateDomain(ctx context.Context, d *types.GettableOrgDomain) basemodel.BaseApiError
|
||||
UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError)
|
||||
|
||||
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
||||
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
|
||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
|
||||
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
|
||||
CreatePAT(ctx context.Context, orgID string, p eeTypes.GettablePAT) (eeTypes.GettablePAT, basemodel.BaseApiError)
|
||||
UpdatePAT(ctx context.Context, orgID string, p eeTypes.GettablePAT, id valuer.UUID) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*eeTypes.GettablePAT, basemodel.BaseApiError)
|
||||
GetPATByID(ctx context.Context, orgID string, id valuer.UUID) (*eeTypes.GettablePAT, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context, orgID string) ([]eeTypes.GettablePAT, basemodel.BaseApiError)
|
||||
RevokePAT(ctx context.Context, orgID string, id valuer.UUID, userID string) basemodel.BaseApiError
|
||||
}
|
||||
|
||||
@@ -1,204 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*types.User, basemodel.BaseApiError) {
|
||||
// get auth domain from email domain
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||
}
|
||||
if domain == nil {
|
||||
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
|
||||
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
|
||||
}
|
||||
|
||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||
}
|
||||
|
||||
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
Name: "",
|
||||
Email: email,
|
||||
Password: hash,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
ProfilePictureURL: "", // Currently unused
|
||||
GroupID: group.ID,
|
||||
OrgID: domain.OrgId,
|
||||
}
|
||||
|
||||
user, apiErr = m.CreateUser(ctx, user, false)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("CreateUser failed", zap.Error(apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
return user, nil
|
||||
|
||||
}
|
||||
|
||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||
// is successfully parsed (i.e. valid email is available)
|
||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||
|
||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||
if !apierr.IsNil() {
|
||||
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
|
||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||
}
|
||||
|
||||
user := &types.User{}
|
||||
|
||||
if userPayload == nil {
|
||||
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||
user = newUser
|
||||
if apiErr != nil {
|
||||
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
|
||||
return "", apiErr
|
||||
}
|
||||
} else {
|
||||
user = &userPayload.User
|
||||
}
|
||||
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(user, jwt)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
user.ID,
|
||||
tokenStore.RefreshJwt), nil
|
||||
}
|
||||
|
||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
return false, apierr
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, check if the user has admin role
|
||||
userPayload, baseapierr := m.GetUserByEmail(ctx, email)
|
||||
|
||||
if baseapierr != nil || userPayload == nil {
|
||||
return false, baseapierr
|
||||
}
|
||||
|
||||
if userPayload.Role != baseconst.AdminGroup {
|
||||
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PrecheckLogin is called when the login or signup page is loaded
|
||||
// to check sso login is to be prompted
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
||||
|
||||
// assume user is valid unless proven otherwise
|
||||
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||
|
||||
// check if email is a valid user
|
||||
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
||||
if baseApiErr != nil {
|
||||
return resp, baseApiErr
|
||||
}
|
||||
|
||||
if userPayload == nil {
|
||||
resp.IsUser = false
|
||||
}
|
||||
|
||||
ssoAvailable := true
|
||||
err := m.checkFeature(model.SSO)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case basemodel.ErrFeatureUnavailable:
|
||||
// do nothing, just skip sso
|
||||
ssoAvailable = false
|
||||
default:
|
||||
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||
return resp, model.BadRequestStr(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if ssoAvailable {
|
||||
|
||||
resp.IsUser = true
|
||||
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
var emailDomain string
|
||||
emailComponents := strings.Split(email, "@")
|
||||
if len(emailComponents) > 0 {
|
||||
emailDomain = emailComponents[1]
|
||||
}
|
||||
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||
return resp, apierr
|
||||
}
|
||||
|
||||
if orgDomain != nil && orgDomain.SsoEnabled {
|
||||
// saml is enabled for this domain, lets prepare sso url
|
||||
|
||||
if sourceUrl == "" {
|
||||
sourceUrl = constants.GetDefaultSiteURL()
|
||||
}
|
||||
|
||||
// parse source url that generated the login request
|
||||
var err error
|
||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||
siteUrl, err := url.Parse(escapedUrl)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to parse referer", zap.Error(err))
|
||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
||||
}
|
||||
|
||||
// build Idp URL that will authenticat the user
|
||||
// the front-end will redirect user to this url
|
||||
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||
return resp, model.InternalError(err)
|
||||
}
|
||||
|
||||
// set SSO to true, as the url is generated correctly
|
||||
resp.SSO = true
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
@@ -9,32 +9,23 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
ossTypes "github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/google/uuid"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// StoredDomain represents stored database record for org domain
|
||||
|
||||
type StoredDomain struct {
|
||||
Id uuid.UUID `db:"id"`
|
||||
Name string `db:"name"`
|
||||
OrgId string `db:"org_id"`
|
||||
Data string `db:"data"`
|
||||
CreatedAt int64 `db:"created_at"`
|
||||
UpdatedAt int64 `db:"updated_at"`
|
||||
}
|
||||
|
||||
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
||||
// user domain. The domain is further used to process validity of the response.
|
||||
// when sending login request to IdP we send relay state as URL (site url)
|
||||
// with domainId or domainName as query parameter.
|
||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error) {
|
||||
// derive domain id from relay state now
|
||||
var domainIdStr string
|
||||
var domainNameStr string
|
||||
var domain *model.OrgDomain
|
||||
var domain *types.GettableOrgDomain
|
||||
|
||||
for k, v := range relayState.Query() {
|
||||
if k == "domainId" && len(v) > 0 {
|
||||
@@ -53,7 +44,7 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
||||
}
|
||||
|
||||
domain, err = m.GetDomain(ctx, domainId)
|
||||
if (err != nil) || domain == nil {
|
||||
if err != nil {
|
||||
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
@@ -63,7 +54,7 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
||||
|
||||
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||
domain = domainFromDB
|
||||
if (err != nil) || domain == nil {
|
||||
if err != nil {
|
||||
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
@@ -76,10 +67,14 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
||||
}
|
||||
|
||||
// GetDomainByName returns org domain for a given domain name
|
||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||
stored := types.StorableOrgDomain{}
|
||||
err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&stored).
|
||||
Where("name = ?", name).
|
||||
Limit(1).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@@ -88,7 +83,7 @@ func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.Org
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
@@ -96,10 +91,14 @@ func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.Org
|
||||
}
|
||||
|
||||
// GetDomain returns org domain for a given domain id
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
|
||||
stored := types.StorableOrgDomain{}
|
||||
err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&stored).
|
||||
Where("id = ?", id).
|
||||
Limit(1).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@@ -108,7 +107,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
@@ -116,21 +115,24 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
|
||||
}
|
||||
|
||||
// ListDomains gets the list of auth domains by org id
|
||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
|
||||
domains := []model.OrgDomain{}
|
||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError) {
|
||||
domains := []types.GettableOrgDomain{}
|
||||
|
||||
stored := []StoredDomain{}
|
||||
err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
|
||||
stored := []types.StorableOrgDomain{}
|
||||
err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&stored).
|
||||
Where("org_id = ?", orgId).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return []model.OrgDomain{}, nil
|
||||
return domains, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
for _, s := range stored {
|
||||
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
||||
domain := types.GettableOrgDomain{StorableOrgDomain: s}
|
||||
if err := domain.LoadConfig(s.Data); err != nil {
|
||||
zap.L().Error("ListDomains() failed", zap.Error(err))
|
||||
}
|
||||
@@ -141,14 +143,14 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
||||
}
|
||||
|
||||
// CreateDomain creates a new auth domain
|
||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
domain.Id = uuid.New()
|
||||
if domain.ID == uuid.Nil {
|
||||
domain.ID = uuid.New()
|
||||
}
|
||||
|
||||
if domain.OrgId == "" || domain.Name == "" {
|
||||
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
||||
if domain.OrgID == "" || domain.Name == "" {
|
||||
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgID, Name "))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
@@ -157,14 +159,17 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
"INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
domain.Id,
|
||||
domain.Name,
|
||||
domain.OrgId,
|
||||
configJson,
|
||||
time.Now().Unix(),
|
||||
time.Now().Unix())
|
||||
storableDomain := types.StorableOrgDomain{
|
||||
ID: domain.ID,
|
||||
Name: domain.Name,
|
||||
OrgID: domain.OrgID,
|
||||
Data: string(configJson),
|
||||
TimeAuditable: ossTypes.TimeAuditable{CreatedAt: time.Now(), UpdatedAt: time.Now()},
|
||||
}
|
||||
|
||||
_, err = m.sqlStore.BunDB().NewInsert().
|
||||
Model(&storableDomain).
|
||||
Exec(ctx)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to insert domain in db", zap.Error(err))
|
||||
@@ -175,9 +180,9 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
||||
}
|
||||
|
||||
// UpdateDomain updates stored config params for a domain
|
||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
if domain.ID == uuid.Nil {
|
||||
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
@@ -188,11 +193,19 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
"UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
|
||||
configJson,
|
||||
time.Now().Unix(),
|
||||
domain.Id)
|
||||
storableDomain := &types.StorableOrgDomain{
|
||||
ID: domain.ID,
|
||||
Name: domain.Name,
|
||||
OrgID: domain.OrgID,
|
||||
Data: string(configJson),
|
||||
TimeAuditable: ossTypes.TimeAuditable{UpdatedAt: time.Now()},
|
||||
}
|
||||
|
||||
_, err = m.sqlStore.BunDB().NewUpdate().
|
||||
Model(storableDomain).
|
||||
Column("data", "updated_at").
|
||||
WherePK().
|
||||
Exec(ctx)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("domain update failed", zap.Error(err))
|
||||
@@ -210,9 +223,11 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"DELETE FROM org_domains WHERE id = $1",
|
||||
id)
|
||||
storableDomain := &types.StorableOrgDomain{ID: id}
|
||||
_, err := m.sqlStore.BunDB().NewDelete().
|
||||
Model(storableDomain).
|
||||
WherePK().
|
||||
Exec(ctx)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("domain delete failed", zap.Error(err))
|
||||
@@ -222,7 +237,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
if email == "" {
|
||||
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
||||
@@ -235,8 +250,12 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
||||
|
||||
parsedDomain := components[1]
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
|
||||
stored := types.StorableOrgDomain{}
|
||||
err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&stored).
|
||||
Where("name = ?", parsedDomain).
|
||||
Limit(1).
|
||||
Scan(ctx)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@@ -245,7 +264,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
@@ -1,46 +1,18 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
)
|
||||
|
||||
type modelDao struct {
|
||||
*basedsql.ModelDaoSqlite
|
||||
flags baseint.FeatureLookup
|
||||
}
|
||||
|
||||
// SetFlagProvider sets the feature lookup provider
|
||||
func (m *modelDao) SetFlagProvider(flags baseint.FeatureLookup) {
|
||||
m.flags = flags
|
||||
}
|
||||
|
||||
// CheckFeature confirms if a feature is available
|
||||
func (m *modelDao) checkFeature(key string) error {
|
||||
if m.flags == nil {
|
||||
return fmt.Errorf("flag provider not set")
|
||||
}
|
||||
|
||||
return m.flags.CheckFeature(key)
|
||||
userModule user.Module
|
||||
sqlStore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(sqlStore sqlstore.SQLStore) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(sqlStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// set package variable so dependent base methods (e.g. AuthCache) will work
|
||||
basedao.SetDB(dao)
|
||||
m := &modelDao{ModelDaoSqlite: dao}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) DB() *sqlx.DB {
|
||||
return m.ModelDaoSqlite.DB()
|
||||
func NewModelDao(sqlStore sqlstore.SQLStore) *modelDao {
|
||||
userModule := impluser.NewModule(impluser.NewStore(sqlStore))
|
||||
return &modelDao{userModule: userModule, sqlStore: sqlStore}
|
||||
}
|
||||
|
||||
@@ -3,65 +3,60 @@ package sqlite
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/types"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
ossTypes "github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
||||
result, err := m.DB().ExecContext(ctx,
|
||||
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||
p.UserID,
|
||||
p.Token,
|
||||
p.Role,
|
||||
p.Name,
|
||||
p.CreatedAt,
|
||||
p.ExpiresAt,
|
||||
p.UpdatedAt,
|
||||
p.UpdatedByUserID,
|
||||
p.LastUsed,
|
||||
p.Revoked,
|
||||
)
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, orgID string, p types.GettablePAT) (types.GettablePAT, basemodel.BaseApiError) {
|
||||
p.StorablePersonalAccessToken.OrgID = orgID
|
||||
p.StorablePersonalAccessToken.ID = valuer.GenerateUUID()
|
||||
_, err := m.sqlStore.BunDB().NewInsert().
|
||||
Model(&p.StorablePersonalAccessToken).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
return types.GettablePAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
id, err := result.LastInsertId()
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
p.Id = strconv.Itoa(int(id))
|
||||
createdByUser, _ := m.GetUser(ctx, p.UserID)
|
||||
|
||||
createdByUser, _ := m.userModule.GetUserByID(ctx, orgID, p.UserID)
|
||||
if createdByUser == nil {
|
||||
p.CreatedByUser = model.User{
|
||||
p.CreatedByUser = types.PatUser{
|
||||
NotFound: true,
|
||||
}
|
||||
} else {
|
||||
p.CreatedByUser = model.User{
|
||||
Id: createdByUser.ID,
|
||||
Name: createdByUser.Name,
|
||||
Email: createdByUser.Email,
|
||||
CreatedAt: createdByUser.CreatedAt.Unix(),
|
||||
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||
NotFound: false,
|
||||
p.CreatedByUser = types.PatUser{
|
||||
User: ossTypes.User{
|
||||
Identifiable: ossTypes.Identifiable{
|
||||
ID: createdByUser.ID,
|
||||
},
|
||||
DisplayName: createdByUser.DisplayName,
|
||||
Email: createdByUser.Email,
|
||||
TimeAuditable: ossTypes.TimeAuditable{
|
||||
CreatedAt: createdByUser.CreatedAt,
|
||||
UpdatedAt: createdByUser.UpdatedAt,
|
||||
},
|
||||
},
|
||||
NotFound: false,
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
|
||||
p.Role,
|
||||
p.Name,
|
||||
p.UpdatedAt,
|
||||
p.UpdatedByUserID,
|
||||
id)
|
||||
func (m *modelDao) UpdatePAT(ctx context.Context, orgID string, p types.GettablePAT, id valuer.UUID) basemodel.BaseApiError {
|
||||
_, err := m.sqlStore.BunDB().NewUpdate().
|
||||
Model(&p.StorablePersonalAccessToken).
|
||||
Column("role", "name", "updated_at", "updated_by_user_id").
|
||||
Where("id = ?", id.StringValue()).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("revoked = false").
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT update failed"))
|
||||
@@ -69,66 +64,84 @@ func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
|
||||
lastUsed,
|
||||
token)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *modelDao) ListPATs(ctx context.Context, orgID string) ([]types.GettablePAT, basemodel.BaseApiError) {
|
||||
pats := []types.StorablePersonalAccessToken{}
|
||||
|
||||
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
|
||||
if err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&pats).
|
||||
Where("revoked = false").
|
||||
Where("org_id = ?", orgID).
|
||||
Order("updated_at DESC").
|
||||
Scan(ctx); err != nil {
|
||||
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||
}
|
||||
|
||||
patsWithUsers := []types.GettablePAT{}
|
||||
for i := range pats {
|
||||
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
|
||||
patWithUser := types.GettablePAT{
|
||||
StorablePersonalAccessToken: pats[i],
|
||||
}
|
||||
|
||||
createdByUser, _ := m.userModule.GetUserByID(ctx, orgID, pats[i].UserID)
|
||||
if createdByUser == nil {
|
||||
pats[i].CreatedByUser = model.User{
|
||||
patWithUser.CreatedByUser = types.PatUser{
|
||||
NotFound: true,
|
||||
}
|
||||
} else {
|
||||
pats[i].CreatedByUser = model.User{
|
||||
Id: createdByUser.ID,
|
||||
Name: createdByUser.Name,
|
||||
Email: createdByUser.Email,
|
||||
CreatedAt: createdByUser.CreatedAt.Unix(),
|
||||
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||
NotFound: false,
|
||||
patWithUser.CreatedByUser = types.PatUser{
|
||||
User: ossTypes.User{
|
||||
Identifiable: ossTypes.Identifiable{
|
||||
ID: createdByUser.ID,
|
||||
},
|
||||
DisplayName: createdByUser.DisplayName,
|
||||
Email: createdByUser.Email,
|
||||
TimeAuditable: ossTypes.TimeAuditable{
|
||||
CreatedAt: createdByUser.CreatedAt,
|
||||
UpdatedAt: createdByUser.UpdatedAt,
|
||||
},
|
||||
},
|
||||
NotFound: false,
|
||||
}
|
||||
}
|
||||
|
||||
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
|
||||
updatedByUser, _ := m.userModule.GetUserByID(ctx, orgID, pats[i].UpdatedByUserID)
|
||||
if updatedByUser == nil {
|
||||
pats[i].UpdatedByUser = model.User{
|
||||
patWithUser.UpdatedByUser = types.PatUser{
|
||||
NotFound: true,
|
||||
}
|
||||
} else {
|
||||
pats[i].UpdatedByUser = model.User{
|
||||
Id: updatedByUser.ID,
|
||||
Name: updatedByUser.Name,
|
||||
Email: updatedByUser.Email,
|
||||
CreatedAt: updatedByUser.CreatedAt.Unix(),
|
||||
ProfilePictureURL: updatedByUser.ProfilePictureURL,
|
||||
NotFound: false,
|
||||
patWithUser.UpdatedByUser = types.PatUser{
|
||||
User: ossTypes.User{
|
||||
Identifiable: ossTypes.Identifiable{
|
||||
ID: updatedByUser.ID,
|
||||
},
|
||||
DisplayName: updatedByUser.DisplayName,
|
||||
Email: updatedByUser.Email,
|
||||
TimeAuditable: ossTypes.TimeAuditable{
|
||||
CreatedAt: updatedByUser.CreatedAt,
|
||||
UpdatedAt: updatedByUser.UpdatedAt,
|
||||
},
|
||||
},
|
||||
NotFound: false,
|
||||
}
|
||||
}
|
||||
|
||||
patsWithUsers = append(patsWithUsers, patWithUser)
|
||||
}
|
||||
return pats, nil
|
||||
return patsWithUsers, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError {
|
||||
func (m *modelDao) RevokePAT(ctx context.Context, orgID string, id valuer.UUID, userID string) basemodel.BaseApiError {
|
||||
updatedAt := time.Now().Unix()
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
|
||||
userID, updatedAt, id)
|
||||
_, err := m.sqlStore.BunDB().NewUpdate().
|
||||
Model(&types.StorablePersonalAccessToken{}).
|
||||
Set("revoked = ?", true).
|
||||
Set("updated_by_user_id = ?", userID).
|
||||
Set("updated_at = ?", updatedAt).
|
||||
Where("id = ?", id.StringValue()).
|
||||
Where("org_id = ?", orgID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||
@@ -136,10 +149,14 @@ func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) base
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*types.GettablePAT, basemodel.BaseApiError) {
|
||||
pats := []types.StorablePersonalAccessToken{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
|
||||
if err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&pats).
|
||||
Where("token = ?", token).
|
||||
Where("revoked = false").
|
||||
Scan(ctx); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
@@ -150,13 +167,22 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
patWithUser := types.GettablePAT{
|
||||
StorablePersonalAccessToken: pats[0],
|
||||
}
|
||||
|
||||
return &patWithUser, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, orgID string, id valuer.UUID) (*types.GettablePAT, basemodel.BaseApiError) {
|
||||
pats := []types.StorablePersonalAccessToken{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
|
||||
if err := m.sqlStore.BunDB().NewSelect().
|
||||
Model(&pats).
|
||||
Where("id = ?", id.StringValue()).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("revoked = false").
|
||||
Scan(ctx); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
@@ -167,34 +193,9 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
// deprecated
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError) {
|
||||
users := []types.GettableUser{}
|
||||
|
||||
query := `SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
u.email,
|
||||
u.password,
|
||||
u.created_at,
|
||||
u.profile_picture_url,
|
||||
u.org_id,
|
||||
u.group_id
|
||||
FROM users u, personal_access_tokens p
|
||||
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
|
||||
|
||||
if err := m.DB().Select(&users, query, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
patWithUser := types.GettablePAT{
|
||||
StorablePersonalAccessToken: pats[0],
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
return &users[0], nil
|
||||
return &patWithUser, nil
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package signozio
|
||||
|
||||
type status string
|
||||
|
||||
type ValidateLicenseResponse struct {
|
||||
Status status `json:"status"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type CheckoutSessionRedirect struct {
|
||||
RedirectURL string `json:"url"`
|
||||
}
|
||||
type CheckoutResponse struct {
|
||||
Status status `json:"status"`
|
||||
Data CheckoutSessionRedirect `json:"data"`
|
||||
}
|
||||
@@ -1,223 +1,67 @@
|
||||
package signozio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
var C *Client
|
||||
|
||||
const (
|
||||
POST = "POST"
|
||||
APPLICATION_JSON = "application/json"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Prefix string
|
||||
GatewayUrl string
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
return &Client{
|
||||
Prefix: constants.LicenseSignozIo,
|
||||
GatewayUrl: constants.ZeusURL,
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
C = New()
|
||||
}
|
||||
|
||||
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
|
||||
|
||||
// Creating an HTTP client with a timeout for better control
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to create request"))
|
||||
}
|
||||
|
||||
// Setting the custom header
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to make post request"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 200:
|
||||
a := ValidateLicenseResponse{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3(a.Data)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
|
||||
}
|
||||
|
||||
return license, nil
|
||||
case 400:
|
||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
|
||||
case 401:
|
||||
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
|
||||
default:
|
||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, POST, url, body)
|
||||
func ValidateLicenseV3(ctx context.Context, licenseKey string, zeus zeus.Zeus) (*model.LicenseV3, error) {
|
||||
data, err := zeus.GetLicense(ctx, licenseKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Content-Type", contentType)
|
||||
return req, err
|
||||
|
||||
var m map[string]any
|
||||
if err = json.Unmarshal(data, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return license, nil
|
||||
}
|
||||
|
||||
// SendUsage reports the usage of signoz to license server
|
||||
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
||||
reqString, _ := json.Marshal(usage)
|
||||
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
func SendUsage(ctx context.Context, usage model.UsagePayload, zeus zeus.Zeus) error {
|
||||
body, err := json.Marshal(usage)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
switch res.StatusCode {
|
||||
case 200, 201:
|
||||
return nil
|
||||
case 400, 401:
|
||||
return model.BadRequest(errors.Wrap(errors.New(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return model.InternalError(errors.Wrap(errors.New(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
return zeus.PutMeters(ctx, usage.LicenseKey.String(), body)
|
||||
}
|
||||
|
||||
func CheckoutSession(ctx context.Context, checkoutRequest *model.CheckoutRequest, licenseKey string) (string, *model.ApiError) {
|
||||
hClient := &http.Client{}
|
||||
|
||||
reqString, err := json.Marshal(checkoutRequest)
|
||||
func CheckoutSession(ctx context.Context, checkoutRequest *model.CheckoutRequest, licenseKey string, zeus zeus.Zeus) (string, error) {
|
||||
body, err := json.Marshal(checkoutRequest)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", C.GatewayUrl+"/v2/subscriptions/me/sessions/checkout", bytes.NewBuffer(reqString))
|
||||
response, err := zeus.GetCheckoutURL(ctx, licenseKey, body)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(err)
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
response, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(err)
|
||||
}
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read checkout response from %v", C.GatewayUrl)))
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 201:
|
||||
a := CheckoutResponse{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(errors.Wrap(err, "failed to unmarshal zeus checkout response"))
|
||||
}
|
||||
return a.Data.RedirectURL, nil
|
||||
case 400:
|
||||
return "", model.BadRequest(errors.Wrap(errors.New(string(body)),
|
||||
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
|
||||
case 401:
|
||||
return "", model.Unauthorized(errors.Wrap(errors.New(string(body)),
|
||||
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
|
||||
default:
|
||||
return "", model.InternalError(errors.Wrap(errors.New(string(body)),
|
||||
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
|
||||
}
|
||||
return gjson.GetBytes(response, "url").String(), nil
|
||||
}
|
||||
|
||||
func PortalSession(ctx context.Context, checkoutRequest *model.PortalRequest, licenseKey string) (string, *model.ApiError) {
|
||||
hClient := &http.Client{}
|
||||
|
||||
reqString, err := json.Marshal(checkoutRequest)
|
||||
func PortalSession(ctx context.Context, portalRequest *model.PortalRequest, licenseKey string, zeus zeus.Zeus) (string, error) {
|
||||
body, err := json.Marshal(portalRequest)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", C.GatewayUrl+"/v2/subscriptions/me/sessions/portal", bytes.NewBuffer(reqString))
|
||||
response, err := zeus.GetPortalURL(ctx, licenseKey, body)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(err)
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
response, err := hClient.Do(req)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(err)
|
||||
}
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read portal response from %v", C.GatewayUrl)))
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 201:
|
||||
a := CheckoutResponse{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return "", model.BadRequest(errors.Wrap(err, "failed to unmarshal zeus portal response"))
|
||||
}
|
||||
return a.Data.RedirectURL, nil
|
||||
case 400:
|
||||
return "", model.BadRequest(errors.Wrap(errors.New(string(body)),
|
||||
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
|
||||
case 401:
|
||||
return "", model.Unauthorized(errors.Wrap(errors.New(string(body)),
|
||||
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
|
||||
default:
|
||||
return "", model.InternalError(errors.Wrap(errors.New(string(body)),
|
||||
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
|
||||
}
|
||||
return gjson.GetBytes(response, "url").String(), nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
// Connector defines methods for interaction
|
||||
// with o11y data. for example - clickhouse
|
||||
type DataConnector interface {
|
||||
Start(readerReady chan bool)
|
||||
baseint.Reader
|
||||
}
|
||||
|
||||
@@ -9,25 +9,25 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Repo is license repo. stores license keys in a secured DB
|
||||
type Repo struct {
|
||||
db *sqlx.DB
|
||||
bundb *bun.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
// NewLicenseRepo initiates a new license repo
|
||||
func NewLicenseRepo(db *sqlx.DB, bundb *bun.DB) Repo {
|
||||
func NewLicenseRepo(db *sqlx.DB, store sqlstore.SQLStore) Repo {
|
||||
return Repo{
|
||||
db: db,
|
||||
bundb: bundb,
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,7 +171,7 @@ func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
|
||||
|
||||
func (r *Repo) CreateFeature(req *types.FeatureStatus) *basemodel.ApiError {
|
||||
|
||||
_, err := r.bundb.NewInsert().
|
||||
_, err := r.store.BunDB().NewInsert().
|
||||
Model(req).
|
||||
Exec(context.Background())
|
||||
if err != nil {
|
||||
@@ -183,7 +183,7 @@ func (r *Repo) CreateFeature(req *types.FeatureStatus) *basemodel.ApiError {
|
||||
func (r *Repo) GetFeature(featureName string) (types.FeatureStatus, error) {
|
||||
var feature types.FeatureStatus
|
||||
|
||||
err := r.bundb.NewSelect().
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&feature).
|
||||
Where("name = ?", featureName).
|
||||
Scan(context.Background())
|
||||
@@ -212,7 +212,7 @@ func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
|
||||
|
||||
func (r *Repo) UpdateFeature(req types.FeatureStatus) error {
|
||||
|
||||
_, err := r.bundb.NewUpdate().
|
||||
_, err := r.store.BunDB().NewUpdate().
|
||||
Model(&req).
|
||||
Where("name = ?", req.Name).
|
||||
Exec(context.Background())
|
||||
|
||||
@@ -6,19 +6,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"sync"
|
||||
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
|
||||
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
validate "github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -29,6 +28,7 @@ var validationFrequency = 24 * 60 * time.Minute
|
||||
|
||||
type Manager struct {
|
||||
repo *Repo
|
||||
zeus zeus.Zeus
|
||||
mutex sync.Mutex
|
||||
validatorRunning bool
|
||||
// end the license validation, this is important to gracefully
|
||||
@@ -45,14 +45,15 @@ type Manager struct {
|
||||
activeFeatures basemodel.FeatureSet
|
||||
}
|
||||
|
||||
func StartManager(db *sqlx.DB, bundb *bun.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||
func StartManager(db *sqlx.DB, store sqlstore.SQLStore, zeus zeus.Zeus, features ...basemodel.Feature) (*Manager, error) {
|
||||
if LM != nil {
|
||||
return LM, nil
|
||||
}
|
||||
|
||||
repo := NewLicenseRepo(db, bundb)
|
||||
repo := NewLicenseRepo(db, store)
|
||||
m := &Manager{
|
||||
repo: &repo,
|
||||
zeus: zeus,
|
||||
}
|
||||
if err := m.start(features...); err != nil {
|
||||
return m, err
|
||||
@@ -172,17 +173,15 @@ func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError
|
||||
func (lm *Manager) RefreshLicense(ctx context.Context) error {
|
||||
license, err := validate.ValidateLicenseV3(ctx, lm.activeLicenseV3.Key, lm.zeus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := lm.repo.UpdateLicenseV3(ctx, license)
|
||||
err = lm.repo.UpdateLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
|
||||
return err
|
||||
}
|
||||
lm.SetActiveV3(license)
|
||||
|
||||
@@ -190,7 +189,6 @@ func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||
}
|
||||
|
||||
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
|
||||
zap.L().Info("License validation started")
|
||||
if lm.activeLicenseV3 == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -236,28 +234,17 @@ func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||
map[string]interface{}{"err": errResponse.Err.Error()}, claims.Email, true, false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(licenseKey)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
|
||||
return nil, apiError
|
||||
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (*model.LicenseV3, error) {
|
||||
license, err := validate.ValidateLicenseV3(ctx, licenseKey, lm.zeus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// insert the new license to the sqlite db
|
||||
err := lm.repo.InsertLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, err
|
||||
modelErr := lm.repo.InsertLicenseV3(ctx, license)
|
||||
if modelErr != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(modelErr))
|
||||
return nil, modelErr
|
||||
}
|
||||
|
||||
// license is valid, activate it
|
||||
|
||||
@@ -3,89 +3,38 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
eeuserimpl "github.com/SigNoz/signoz/ee/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/ee/zeus"
|
||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
// Deprecated: Please use the logger from pkg/instrumentation.
|
||||
func initZapLog() *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||
defaultLogLevel := zapcore.InfoLevel
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("query-service"),
|
||||
)
|
||||
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
)
|
||||
|
||||
if enableQueryServiceLogOTLPExport {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to establish connection: %v", err)
|
||||
} else {
|
||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||
if err != nil {
|
||||
logExportBatchSizeInt = 512
|
||||
}
|
||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||
BatchSize: logExportBatchSizeInt,
|
||||
ResourceSchema: semconv.SchemaURL,
|
||||
Resource: res,
|
||||
}))
|
||||
core = zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
|
||||
)
|
||||
}
|
||||
}
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
|
||||
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
@@ -99,7 +48,6 @@ func main() {
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
@@ -108,32 +56,39 @@ func main() {
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
|
||||
// Deprecated
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
// Deprecated
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
// Deprecated
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
// Deprecated
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
// Deprecated
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||
// Deprecated
|
||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||
// Deprecated
|
||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||
// Deprecated
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||
// Deprecated
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
// Deprecated
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
// Deprecated
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
|
||||
loggerMgr := initZapLog()
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
@@ -144,21 +99,37 @@ func main() {
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
Config: promConfigPath,
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
sqlStoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(
|
||||
context.Background(),
|
||||
config,
|
||||
zeus.Config(),
|
||||
httpzeus.NewProviderFactory(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
signoz.NewSQLStoreProviderFactories(),
|
||||
sqlStoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
func(sqlstore sqlstore.SQLStore) user.Module {
|
||||
return eeuserimpl.NewModule(eeuserimpl.NewStore(sqlstore))
|
||||
},
|
||||
func(userModule user.Module) user.Handler {
|
||||
return eeuserimpl.NewHandler(userModule)
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
zap.L().Fatal("Failed to create signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
@@ -175,19 +146,12 @@ func main() {
|
||||
Config: config,
|
||||
SigNoz: signoz,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
CacheConfigPath: cacheConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
Jwt: jwt,
|
||||
}
|
||||
|
||||
@@ -196,14 +160,10 @@ func main() {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := server.Start(); err != nil {
|
||||
if err := server.Start(context.Background()); err != nil {
|
||||
zap.L().Fatal("Could not start server", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := auth.InitAuthCache(context.Background()); err != nil {
|
||||
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz.Start(context.Background())
|
||||
|
||||
if err := signoz.Wait(context.Background()); err != nil {
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
// GettableInvitation overrides base object and adds precheck into
|
||||
// response
|
||||
type GettableInvitation struct {
|
||||
*basemodel.InvitationResponseObject
|
||||
Precheck *basemodel.PrecheckResponse `json:"precheck"`
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package model
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type ApiError struct {
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/pkg/errors"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type License struct {
|
||||
@@ -157,8 +157,6 @@ func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||
}
|
||||
|
||||
switch planName {
|
||||
case PlanNameTeams:
|
||||
features = append(features, ProPlan...)
|
||||
case PlanNameEnterprise:
|
||||
features = append(features, EnterprisePlan...)
|
||||
case PlanNameBasic:
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestNewLicenseV3(t *testing.T) {
|
||||
@@ -74,21 +74,21 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Parse the entire license properly",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "ACTIVE",
|
||||
@@ -98,14 +98,14 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Fallback to basic plan if license status is invalid",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "INVALID",
|
||||
@@ -122,21 +122,21 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "fallback states for validFrom and validUntil",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"valid_from": 1234.456,
|
||||
"valid_until": 5678.567,
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1234,
|
||||
ValidUntil: 5678,
|
||||
Status: "ACTIVE",
|
||||
|
||||
@@ -1,32 +1,7 @@
|
||||
package model
|
||||
|
||||
type User struct {
|
||||
Id string `json:"id" db:"id"`
|
||||
Name string `json:"name" db:"name"`
|
||||
Email string `json:"email" db:"email"`
|
||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||
ProfilePictureURL string `json:"profilePictureURL" db:"profile_picture_url"`
|
||||
NotFound bool `json:"notFound"`
|
||||
}
|
||||
|
||||
type CreatePATRequestBody struct {
|
||||
Name string `json:"name"`
|
||||
Role string `json:"role"`
|
||||
ExpiresInDays int64 `json:"expiresInDays"`
|
||||
}
|
||||
|
||||
type PAT struct {
|
||||
Id string `json:"id" db:"id"`
|
||||
UserID string `json:"userId" db:"user_id"`
|
||||
CreatedByUser User `json:"createdByUser"`
|
||||
UpdatedByUser User `json:"updatedByUser"`
|
||||
Token string `json:"token" db:"token"`
|
||||
Role string `json:"role" db:"role"`
|
||||
Name string `json:"name" db:"name"`
|
||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
|
||||
UpdatedAt int64 `json:"updatedAt" db:"updated_at"`
|
||||
LastUsed int64 `json:"lastUsed" db:"last_used"`
|
||||
Revoked bool `json:"revoked" db:"revoked"`
|
||||
UpdatedByUserID string `json:"updatedByUserId" db:"updated_by_user_id"`
|
||||
}
|
||||
|
||||
@@ -1,30 +1,26 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
const SSO = "SSO"
|
||||
const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
|
||||
var (
|
||||
PlanNameEnterprise = "ENTERPRISE"
|
||||
PlanNameTeams = "TEAMS"
|
||||
PlanNameBasic = "BASIC"
|
||||
)
|
||||
|
||||
var (
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameEnterprise: Enterprise}
|
||||
)
|
||||
|
||||
var (
|
||||
LicenseStatusInvalid = "INVALID"
|
||||
)
|
||||
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
const Gateway = "GATEWAY"
|
||||
@@ -38,90 +34,6 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -151,134 +63,12 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
@@ -289,83 +79,6 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -409,8 +122,8 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
|
||||
@@ -11,22 +11,24 @@ import (
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/times"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
@@ -52,8 +54,8 @@ type AnomalyRule struct {
|
||||
|
||||
func NewAnomalyRule(
|
||||
id string,
|
||||
p *baserules.PostableRule,
|
||||
featureFlags interfaces.FeatureLookup,
|
||||
orgID valuer.UUID,
|
||||
p *ruletypes.PostableRule,
|
||||
reader interfaces.Reader,
|
||||
cache cache.Cache,
|
||||
opts ...baserules.RuleOption,
|
||||
@@ -61,12 +63,12 @@ func NewAnomalyRule(
|
||||
|
||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
||||
|
||||
if p.RuleCondition.CompareOp == baserules.ValueIsBelow {
|
||||
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
||||
target := -1 * *p.RuleCondition.Target
|
||||
p.RuleCondition.Target = &target
|
||||
}
|
||||
|
||||
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
|
||||
baseRule, err := baserules.NewBaseRule(id, orgID, p, reader, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,10 +91,9 @@ func NewAnomalyRule(
|
||||
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FeatureLookup: featureFlags,
|
||||
Reader: reader,
|
||||
Cache: cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
}
|
||||
|
||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||
@@ -102,27 +103,24 @@ func NewAnomalyRule(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](featureFlags),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||
t.provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](featureFlags),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||
t.provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](featureFlags),
|
||||
)
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) Type() baserules.RuleType {
|
||||
func (r *AnomalyRule) Type() ruletypes.RuleType {
|
||||
return RuleTypeAnomaly
|
||||
}
|
||||
|
||||
@@ -162,18 +160,18 @@ func (r *AnomalyRule) GetSelectedQuery() string {
|
||||
return r.Condition().GetSelectedQueryName()
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baserules.Vector, error) {
|
||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||
|
||||
params, err := r.prepareQueryRange(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.PopulateTemporality(ctx, params)
|
||||
err = r.PopulateTemporality(ctx, orgID, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error while setting temporality")
|
||||
}
|
||||
|
||||
anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{
|
||||
anomalies, err := r.provider.GetAnomalies(ctx, orgID, &anomaly.GetAnomaliesRequest{
|
||||
Params: params,
|
||||
Seasonality: r.seasonality,
|
||||
})
|
||||
@@ -189,7 +187,7 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baser
|
||||
}
|
||||
}
|
||||
|
||||
var resultVector baserules.Vector
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
||||
@@ -208,7 +206,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
prevState := r.State()
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
res, err := r.buildAndRunQuery(ctx, ts)
|
||||
res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -218,7 +216,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
resultFPs := map[uint64]struct{}{}
|
||||
var alerts = make(map[uint64]*baserules.Alert, len(res))
|
||||
var alerts = make(map[uint64]*ruletypes.Alert, len(res))
|
||||
|
||||
for _, smpl := range res {
|
||||
l := make(map[string]string, len(smpl.Metric))
|
||||
@@ -230,7 +228,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
||||
|
||||
tmplData := baserules.AlertTemplateData(l, value, threshold)
|
||||
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
// who are not used to Go's templating system.
|
||||
defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}"
|
||||
@@ -238,7 +236,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
// utility function to apply go template on labels and annotations
|
||||
expand := func(text string) string {
|
||||
|
||||
tmpl := baserules.NewTemplateExpander(
|
||||
tmpl := ruletypes.NewTemplateExpander(
|
||||
ctx,
|
||||
defs+text,
|
||||
"__alert_"+r.Name(),
|
||||
@@ -283,7 +281,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alerts[h] = &baserules.Alert{
|
||||
alerts[h] = &ruletypes.Alert{
|
||||
Labels: lbs,
|
||||
QueryResultLables: resultLabels,
|
||||
Annotations: annotations,
|
||||
@@ -324,7 +322,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > baserules.ResolvedRetention) {
|
||||
if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > ruletypes.ResolvedRetention) {
|
||||
delete(r.Active, fp)
|
||||
}
|
||||
if a.State != model.StateInactive {
|
||||
@@ -380,10 +378,10 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
|
||||
func (r *AnomalyRule) String() string {
|
||||
|
||||
ar := baserules.PostableRule{
|
||||
ar := ruletypes.PostableRule{
|
||||
AlertName: r.Name(),
|
||||
RuleCondition: r.Condition(),
|
||||
EvalWindow: baserules.Duration(r.EvalWindow()),
|
||||
EvalWindow: ruletypes.Duration(r.EvalWindow()),
|
||||
Labels: r.Labels().Map(),
|
||||
Annotations: r.Annotations().Map(),
|
||||
PreferredChannels: r.PreferredChannels(),
|
||||
|
||||
@@ -5,10 +5,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -18,15 +20,13 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
var task baserules.Task
|
||||
|
||||
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
||||
if opts.Rule.RuleType == baserules.RuleTypeThreshold {
|
||||
if opts.Rule.RuleType == ruletypes.RuleTypeThreshold {
|
||||
// create a threshold rule
|
||||
tr, err := baserules.NewThresholdRule(
|
||||
ruleId,
|
||||
opts.OrgID,
|
||||
opts.Rule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
@@ -38,17 +38,18 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
rules = append(rules, tr)
|
||||
|
||||
// create ch rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else if opts.Rule.RuleType == baserules.RuleTypeProm {
|
||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeProm {
|
||||
|
||||
// create promql rule
|
||||
pr, err := baserules.NewPromRule(
|
||||
ruleId,
|
||||
opts.OrgID,
|
||||
opts.Rule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
|
||||
@@ -59,14 +60,14 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
rules = append(rules, pr)
|
||||
|
||||
// create promql rule task for evalution
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else if opts.Rule.RuleType == baserules.RuleTypeAnomaly {
|
||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeAnomaly {
|
||||
// create anomaly rule
|
||||
ar, err := NewAnomalyRule(
|
||||
ruleId,
|
||||
opts.OrgID,
|
||||
opts.Rule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.Cache,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
@@ -79,10 +80,10 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
rules = append(rules, ar)
|
||||
|
||||
// create anomaly rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, baserules.RuleTypeProm, baserules.RuleTypeThreshold)
|
||||
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, ruletypes.RuleTypeProm, ruletypes.RuleTypeThreshold)
|
||||
}
|
||||
|
||||
return task, nil
|
||||
@@ -107,12 +108,12 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
}
|
||||
|
||||
// append name to indicate this is test alert
|
||||
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, baserules.TestAlertPostFix)
|
||||
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, ruletypes.TestAlertPostFix)
|
||||
|
||||
var rule baserules.Rule
|
||||
var err error
|
||||
|
||||
if parsedRule.RuleType == baserules.RuleTypeThreshold {
|
||||
if parsedRule.RuleType == ruletypes.RuleTypeThreshold {
|
||||
|
||||
// add special labels for test alerts
|
||||
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
|
||||
@@ -122,45 +123,44 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
// create a threshold rule
|
||||
rule, err = baserules.NewThresholdRule(
|
||||
alertname,
|
||||
opts.OrgID,
|
||||
parsedRule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
|
||||
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", alertname), zap.Error(err))
|
||||
return 0, basemodel.BadRequest(err)
|
||||
}
|
||||
|
||||
} else if parsedRule.RuleType == baserules.RuleTypeProm {
|
||||
} else if parsedRule.RuleType == ruletypes.RuleTypeProm {
|
||||
|
||||
// create promql rule
|
||||
rule, err = baserules.NewPromRule(
|
||||
alertname,
|
||||
opts.OrgID,
|
||||
parsedRule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
|
||||
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", alertname), zap.Error(err))
|
||||
return 0, basemodel.BadRequest(err)
|
||||
}
|
||||
} else if parsedRule.RuleType == baserules.RuleTypeAnomaly {
|
||||
} else if parsedRule.RuleType == ruletypes.RuleTypeAnomaly {
|
||||
// create anomaly rule
|
||||
rule, err = NewAnomalyRule(
|
||||
alertname,
|
||||
opts.OrgID,
|
||||
parsedRule,
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.Cache,
|
||||
baserules.WithSendAlways(),
|
||||
@@ -168,7 +168,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))
|
||||
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", alertname), zap.Error(err))
|
||||
return 0, basemodel.BadRequest(err)
|
||||
}
|
||||
} else {
|
||||
@@ -194,9 +194,9 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
|
||||
// newTask returns an appropriate group for
|
||||
// rule type
|
||||
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
|
||||
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID valuer.UUID) baserules.Task {
|
||||
if taskType == baserules.TaskTypeCh {
|
||||
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, maintenanceStore, orgID)
|
||||
}
|
||||
return baserules.NewPromRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||
return baserules.NewPromRuleTask(name, "", frequency, rules, opts, notify, maintenanceStore, orgID)
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
package sso
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// SSOIdentity contains details of user received from SSO provider
|
||||
type SSOIdentity struct {
|
||||
UserID string
|
||||
Username string
|
||||
PreferredUsername string
|
||||
Email string
|
||||
EmailVerified bool
|
||||
ConnectorData []byte
|
||||
}
|
||||
|
||||
// OAuthCallbackProvider is an interface implemented by connectors which use an OAuth
|
||||
// style redirect flow to determine user information.
|
||||
type OAuthCallbackProvider interface {
|
||||
// The initial URL user would be redirect to.
|
||||
// OAuth2 implementations support various scopes but we only need profile and user as
|
||||
// the roles are still being managed in SigNoz.
|
||||
BuildAuthURL(state string) (string, error)
|
||||
|
||||
// Handle the callback to the server (after login at oauth provider site)
|
||||
// and return a email identity.
|
||||
// At the moment we dont support auto signup flow (based on domain), so
|
||||
// the full identity (including name, group etc) is not required outside of the
|
||||
// connector
|
||||
HandleCallback(r *http.Request) (identity *SSOIdentity, err error)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -15,11 +14,11 @@ import (
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
|
||||
"github.com/SigNoz/signoz/ee/query-service/dao"
|
||||
"github.com/SigNoz/signoz/ee/query-service/license"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/encryption"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,26 +41,16 @@ type Manager struct {
|
||||
|
||||
modelDao dao.ModelDao
|
||||
|
||||
tenantID string
|
||||
zeus zeus.Zeus
|
||||
}
|
||||
|
||||
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn, chUrl string) (*Manager, error) {
|
||||
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
||||
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(chUrl)
|
||||
|
||||
tenantID := ""
|
||||
if len(hostNameRegexMatches) == 2 {
|
||||
tenantID = hostNameRegexMatches[1]
|
||||
tenantID = strings.TrimSuffix(tenantID, "-clickhouse")
|
||||
}
|
||||
|
||||
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn, zeus zeus.Zeus) (*Manager, error) {
|
||||
m := &Manager{
|
||||
// repository: repo,
|
||||
clickhouseConn: clickhouseConn,
|
||||
licenseRepo: licenseRepo,
|
||||
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||
modelDao: modelDao,
|
||||
tenantID: tenantID,
|
||||
zeus: zeus,
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
@@ -138,15 +127,6 @@ func (lm *Manager) UploadUsage() {
|
||||
|
||||
zap.L().Info("uploading usage data")
|
||||
|
||||
orgName := ""
|
||||
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
|
||||
if orgError != nil {
|
||||
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
|
||||
}
|
||||
if len(orgNames) == 1 {
|
||||
orgName = orgNames[0].Name
|
||||
}
|
||||
|
||||
usagesPayload := []model.Usage{}
|
||||
for _, usage := range usages {
|
||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||
@@ -166,8 +146,8 @@ func (lm *Manager) UploadUsage() {
|
||||
usageData.ExporterID = usage.ExporterID
|
||||
usageData.Type = usage.Type
|
||||
usageData.Tenant = "default"
|
||||
usageData.OrgName = orgName
|
||||
usageData.TenantId = lm.tenantID
|
||||
usageData.OrgName = "default"
|
||||
usageData.TenantId = "default"
|
||||
usagesPayload = append(usagesPayload, usageData)
|
||||
}
|
||||
|
||||
@@ -176,24 +156,18 @@ func (lm *Manager) UploadUsage() {
|
||||
LicenseKey: key,
|
||||
Usage: usagesPayload,
|
||||
}
|
||||
lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) {
|
||||
for i := 1; i <= MaxRetries; i++ {
|
||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||
if apiErr != nil && i == MaxRetries {
|
||||
zap.L().Error("retries stopped : %v", zap.Error(apiErr))
|
||||
// not returning error here since it is captured in the failed count
|
||||
return
|
||||
} else if apiErr != nil {
|
||||
// sleeping for exponential backoff
|
||||
sleepDuration := RetryInterval * time.Duration(i)
|
||||
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err))
|
||||
time.Sleep(sleepDuration)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
body, errv2 := json.Marshal(payload)
|
||||
if errv2 != nil {
|
||||
zap.L().Error("error while marshalling usage payload: %v", zap.Error(errv2))
|
||||
return
|
||||
}
|
||||
|
||||
errv2 = lm.zeus.PutMeters(ctx, payload.LicenseKey.String(), body)
|
||||
if errv2 != nil {
|
||||
zap.L().Error("failed to upload usage: %v", zap.Error(errv2))
|
||||
// not returning error here since it is captured in the failed count
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
448
ee/sqlstore/postgressqlstore/dialect.go
Normal file
448
ee/sqlstore/postgressqlstore/dialect.go
Normal file
@@ -0,0 +1,448 @@
|
||||
package postgressqlstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
var (
|
||||
Identity = "id"
|
||||
Integer = "bigint"
|
||||
Text = "text"
|
||||
)
|
||||
|
||||
var (
|
||||
Org = "org"
|
||||
User = "user"
|
||||
FactorPassword = "factor_password"
|
||||
CloudIntegration = "cloud_integration"
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
FactorPasswordReference = `("password_id") REFERENCES "factor_password" ("id")`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
)
|
||||
|
||||
type dialect struct{}
|
||||
|
||||
func (dialect *dialect) IntToTimestamp(ctx context.Context, bun bun.IDB, table string, column string) error {
|
||||
columnType, err := dialect.GetColumnType(ctx, bun, table, column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// bigint for postgres and INTEGER for sqlite
|
||||
if columnType != "bigint" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the columns is integer then do this
|
||||
if _, err := bun.
|
||||
ExecContext(ctx, `ALTER TABLE `+table+` RENAME COLUMN `+column+` TO `+column+`_old`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add new timestamp column
|
||||
if _, err := bun.
|
||||
NewAddColumn().
|
||||
Table(table).
|
||||
ColumnExpr(column + " TIMESTAMP").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := bun.
|
||||
NewUpdate().
|
||||
Table(table).
|
||||
Set(column + " = to_timestamp(cast(" + column + "_old as INTEGER))").
|
||||
Where("1=1").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// drop old column
|
||||
if _, err := bun.
|
||||
NewDropColumn().
|
||||
Table(table).
|
||||
Column(column + "_old").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) IntToBoolean(ctx context.Context, bun bun.IDB, table string, column string) error {
|
||||
columnExists, err := dialect.ColumnExists(ctx, bun, table, column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !columnExists {
|
||||
return nil
|
||||
}
|
||||
|
||||
columnType, err := dialect.GetColumnType(ctx, bun, table, column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if columnType != "bigint" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := bun.
|
||||
ExecContext(ctx, `ALTER TABLE `+table+` RENAME COLUMN `+column+` TO `+column+`_old`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add new boolean column
|
||||
if _, err := bun.
|
||||
NewAddColumn().
|
||||
Table(table).
|
||||
ColumnExpr(column + " BOOLEAN").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// copy data from old column to new column, converting from int to boolean
|
||||
if _, err := bun.NewUpdate().
|
||||
Table(table).
|
||||
Set(column + " = CASE WHEN " + column + "_old = 1 THEN true ELSE false END").
|
||||
Where("1=1").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// drop old column
|
||||
if _, err := bun.NewDropColumn().Table(table).Column(column + "_old").Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) GetColumnType(ctx context.Context, bun bun.IDB, table string, column string) (string, error) {
|
||||
var columnType string
|
||||
|
||||
err := bun.NewSelect().
|
||||
ColumnExpr("data_type").
|
||||
TableExpr("information_schema.columns").
|
||||
Where("table_name = ?", table).
|
||||
Where("column_name = ?", column).
|
||||
Scan(ctx, &columnType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return columnType, nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) ColumnExists(ctx context.Context, bun bun.IDB, table string, column string) (bool, error) {
|
||||
var count int
|
||||
err := bun.NewSelect().
|
||||
ColumnExpr("COUNT(*)").
|
||||
TableExpr("information_schema.columns").
|
||||
Where("table_name = ?", table).
|
||||
Where("column_name = ?", column).
|
||||
Scan(ctx, &count)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) AddColumn(ctx context.Context, bun bun.IDB, table string, column string, columnExpr string) error {
|
||||
exists, err := dialect.ColumnExists(ctx, bun, table, column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
_, err = bun.
|
||||
NewAddColumn().
|
||||
Table(table).
|
||||
ColumnExpr(column + " " + columnExpr).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) RenameColumn(ctx context.Context, bun bun.IDB, table string, oldColumnName string, newColumnName string) (bool, error) {
|
||||
oldColumnExists, err := dialect.ColumnExists(ctx, bun, table, oldColumnName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
newColumnExists, err := dialect.ColumnExists(ctx, bun, table, newColumnName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if newColumnExists {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !oldColumnExists {
|
||||
return false, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "old column: %s doesn't exist", oldColumnName)
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
ExecContext(ctx, "ALTER TABLE "+table+" RENAME COLUMN "+oldColumnName+" TO "+newColumnName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) DropColumn(ctx context.Context, bun bun.IDB, table string, column string) error {
|
||||
exists, err := dialect.ColumnExists(ctx, bun, table, column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
_, err = bun.
|
||||
NewDropColumn().
|
||||
Table(table).
|
||||
Column(column).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) TableExists(ctx context.Context, bun bun.IDB, table interface{}) (bool, error) {
|
||||
|
||||
count := 0
|
||||
err := bun.
|
||||
NewSelect().
|
||||
ColumnExpr("count(*)").
|
||||
Table("pg_catalog.pg_tables").
|
||||
Where("tablename = ?", bun.Dialect().Tables().Get(reflect.TypeOf(table)).Name).
|
||||
Scan(ctx, &count)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.IDB, oldModel interface{}, newModel interface{}, references []string, cb func(context.Context) error) error {
|
||||
if len(references) == 0 {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot run migration without reference")
|
||||
}
|
||||
exists, err := dialect.TableExists(ctx, bun, newModel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
var fkReferences []string
|
||||
for _, reference := range references {
|
||||
if reference == Org && !slices.Contains(fkReferences, OrgReference) {
|
||||
fkReferences = append(fkReferences, OrgReference)
|
||||
} else if reference == User && !slices.Contains(fkReferences, UserReference) {
|
||||
fkReferences = append(fkReferences, UserReference)
|
||||
} else if reference == FactorPassword && !slices.Contains(fkReferences, FactorPasswordReference) {
|
||||
fkReferences = append(fkReferences, FactorPasswordReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
}
|
||||
}
|
||||
|
||||
createTable := bun.
|
||||
NewCreateTable().
|
||||
IfNotExists().
|
||||
Model(newModel)
|
||||
|
||||
for _, fk := range fkReferences {
|
||||
createTable = createTable.ForeignKey(fk)
|
||||
}
|
||||
|
||||
_, err = createTable.Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cb(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
NewDropTable().
|
||||
IfExists().
|
||||
Model(oldModel).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) AddNotNullDefaultToColumn(ctx context.Context, bun bun.IDB, table string, column, columnType, defaultValue string) error {
|
||||
query := fmt.Sprintf("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s, ALTER COLUMN %s SET NOT NULL", table, column, defaultValue, column)
|
||||
if _, err := bun.ExecContext(ctx, query); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) UpdatePrimaryKey(ctx context.Context, bun bun.IDB, oldModel interface{}, newModel interface{}, reference string, cb func(context.Context) error) error {
|
||||
if reference == "" {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot run migration without reference")
|
||||
}
|
||||
oldTableName := bun.Dialect().Tables().Get(reflect.TypeOf(oldModel)).Name
|
||||
newTableName := bun.Dialect().Tables().Get(reflect.TypeOf(newModel)).Name
|
||||
|
||||
columnType, err := dialect.GetColumnType(ctx, bun, oldTableName, Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if columnType == Text {
|
||||
return nil
|
||||
}
|
||||
|
||||
fkReference := ""
|
||||
if reference == Org {
|
||||
fkReference = OrgReference
|
||||
} else if reference == User {
|
||||
fkReference = UserReference
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
NewCreateTable().
|
||||
IfNotExists().
|
||||
Model(newModel).
|
||||
ForeignKey(fkReference).
|
||||
Exec(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cb(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
NewDropTable().
|
||||
IfExists().
|
||||
Model(oldModel).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", newTableName, oldTableName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) AddPrimaryKey(ctx context.Context, bun bun.IDB, oldModel interface{}, newModel interface{}, reference string, cb func(context.Context) error) error {
|
||||
if reference == "" {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot run migration without reference")
|
||||
}
|
||||
oldTableName := bun.Dialect().Tables().Get(reflect.TypeOf(oldModel)).Name
|
||||
newTableName := bun.Dialect().Tables().Get(reflect.TypeOf(newModel)).Name
|
||||
|
||||
identityExists, err := dialect.ColumnExists(ctx, bun, oldTableName, Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if identityExists {
|
||||
return nil
|
||||
}
|
||||
|
||||
fkReference := ""
|
||||
if reference == Org {
|
||||
fkReference = OrgReference
|
||||
} else if reference == User {
|
||||
fkReference = UserReference
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
NewCreateTable().
|
||||
IfNotExists().
|
||||
Model(newModel).
|
||||
ForeignKey(fkReference).
|
||||
Exec(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cb(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
NewDropTable().
|
||||
IfExists().
|
||||
Model(oldModel).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bun.
|
||||
ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", newTableName, oldTableName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialect *dialect) DropColumnWithForeignKeyConstraint(ctx context.Context, bunIDB bun.IDB, model interface{}, column string) error {
|
||||
existingTable := bunIDB.Dialect().Tables().Get(reflect.TypeOf(model))
|
||||
columnExists, err := dialect.ColumnExists(ctx, bunIDB, existingTable.Name, column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !columnExists {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = bunIDB.
|
||||
NewDropColumn().
|
||||
Model(model).
|
||||
Column(column).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,13 +4,15 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect/pgdialect"
|
||||
"go.signoz.io/signoz/pkg/factory"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
@@ -18,7 +20,7 @@ type provider struct {
|
||||
sqldb *sql.DB
|
||||
bundb *sqlstore.BunDB
|
||||
sqlxdb *sqlx.DB
|
||||
dialect *PGDialect
|
||||
dialect *dialect
|
||||
}
|
||||
|
||||
func NewFactory(hookFactories ...factory.ProviderFactory[sqlstore.SQLStoreHook, sqlstore.Config]) factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config] {
|
||||
@@ -37,7 +39,7 @@ func NewFactory(hookFactories ...factory.ProviderFactory[sqlstore.SQLStoreHook,
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config sqlstore.Config, hooks ...sqlstore.SQLStoreHook) (sqlstore.SQLStore, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "go.signoz.io/signoz/pkg/sqlstore/postgressqlstore")
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/sqlstore/postgressqlstore")
|
||||
|
||||
pgConfig, err := pgxpool.ParseConfig(config.Postgres.DSN)
|
||||
if err != nil {
|
||||
@@ -60,7 +62,7 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
|
||||
sqldb: sqldb,
|
||||
bundb: sqlstore.NewBunDB(settings, sqldb, pgdialect.New(), hooks),
|
||||
sqlxdb: sqlx.NewDb(sqldb, "postgres"),
|
||||
dialect: &PGDialect{},
|
||||
dialect: new(dialect),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -87,3 +89,24 @@ func (provider *provider) BunDBCtx(ctx context.Context) bun.IDB {
|
||||
func (provider *provider) RunInTxCtx(ctx context.Context, opts *sql.TxOptions, cb func(ctx context.Context) error) error {
|
||||
return provider.bundb.RunInTxCtx(ctx, opts, cb)
|
||||
}
|
||||
|
||||
func (provider *provider) WrapNotFoundErrf(err error, code errors.Code, format string, args ...any) error {
|
||||
if err == sql.ErrNoRows {
|
||||
return errors.Wrapf(err, errors.TypeNotFound, code, format, args...)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *provider) WrapAlreadyExistsErrf(err error, code errors.Code, format string, args ...any) error {
|
||||
var pgErr *pgconn.PgError
|
||||
if errors.As(err, &pgErr) && pgErr.Code == "23505" {
|
||||
return errors.Wrapf(err, errors.TypeAlreadyExists, code, format, args...)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (dialect *dialect) ToggleForeignKeyConstraint(ctx context.Context, bun *bun.DB, enable bool) error {
|
||||
return nil
|
||||
}
|
||||
76
ee/types/personal_access_token.go
Normal file
76
ee/types/personal_access_token.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type GettablePAT struct {
|
||||
CreatedByUser PatUser `json:"createdByUser"`
|
||||
UpdatedByUser PatUser `json:"updatedByUser"`
|
||||
|
||||
StorablePersonalAccessToken
|
||||
}
|
||||
|
||||
type PatUser struct {
|
||||
types.User
|
||||
NotFound bool `json:"notFound"`
|
||||
}
|
||||
|
||||
func NewGettablePAT(name, role, userID string, expiresAt int64) GettablePAT {
|
||||
return GettablePAT{
|
||||
StorablePersonalAccessToken: NewStorablePersonalAccessToken(name, role, userID, expiresAt),
|
||||
}
|
||||
}
|
||||
|
||||
type StorablePersonalAccessToken struct {
|
||||
bun.BaseModel `bun:"table:personal_access_token"`
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
|
||||
Role string `json:"role" bun:"role,type:text,notnull,default:'ADMIN'"`
|
||||
UserID string `json:"userId" bun:"user_id,type:text,notnull"`
|
||||
Token string `json:"token" bun:"token,type:text,notnull,unique"`
|
||||
Name string `json:"name" bun:"name,type:text,notnull"`
|
||||
ExpiresAt int64 `json:"expiresAt" bun:"expires_at,notnull,default:0"`
|
||||
LastUsed int64 `json:"lastUsed" bun:"last_used,notnull,default:0"`
|
||||
Revoked bool `json:"revoked" bun:"revoked,notnull,default:false"`
|
||||
UpdatedByUserID string `json:"updatedByUserId" bun:"updated_by_user_id,type:text,notnull,default:''"`
|
||||
}
|
||||
|
||||
func NewStorablePersonalAccessToken(name, role, userID string, expiresAt int64) StorablePersonalAccessToken {
|
||||
now := time.Now()
|
||||
if expiresAt != 0 {
|
||||
// convert expiresAt to unix timestamp from days
|
||||
expiresAt = now.Unix() + (expiresAt * 24 * 60 * 60)
|
||||
}
|
||||
|
||||
// Generate a 32-byte random token.
|
||||
token := make([]byte, 32)
|
||||
rand.Read(token)
|
||||
// Encode the token in base64.
|
||||
encodedToken := base64.StdEncoding.EncodeToString(token)
|
||||
|
||||
return StorablePersonalAccessToken{
|
||||
Token: encodedToken,
|
||||
Name: name,
|
||||
Role: role,
|
||||
UserID: userID,
|
||||
ExpiresAt: expiresAt,
|
||||
LastUsed: 0,
|
||||
Revoked: false,
|
||||
UpdatedByUserID: "",
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
},
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
}
|
||||
}
|
||||
42
ee/zeus/config.go
Normal file
42
ee/zeus/config.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package zeus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
// This will be set via ldflags at build time.
|
||||
var (
|
||||
url string = "<unset>"
|
||||
deprecatedURL string = "<unset>"
|
||||
)
|
||||
|
||||
var (
|
||||
config zeus.Config
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// initializes the Zeus configuration
|
||||
func Config() zeus.Config {
|
||||
once.Do(func() {
|
||||
parsedURL, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid zeus URL: %w", err))
|
||||
}
|
||||
|
||||
deprecatedParsedURL, err := neturl.Parse(deprecatedURL)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid zeus deprecated URL: %w", err))
|
||||
}
|
||||
|
||||
config = zeus.Config{URL: parsedURL, DeprecatedURL: deprecatedParsedURL}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(fmt.Errorf("invalid zeus config: %w", err))
|
||||
}
|
||||
})
|
||||
|
||||
return config
|
||||
}
|
||||
189
ee/zeus/httpzeus/provider.go
Normal file
189
ee/zeus/httpzeus/provider.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package httpzeus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/http/client"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
config zeus.Config
|
||||
httpClient *client.Client
|
||||
}
|
||||
|
||||
func NewProviderFactory() factory.ProviderFactory[zeus.Zeus, zeus.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config zeus.Config) (zeus.Zeus, error) {
|
||||
return New(ctx, providerSettings, config)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config zeus.Config) (zeus.Zeus, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/zeus/httpzeus")
|
||||
|
||||
httpClient, err := client.New(
|
||||
settings.Logger(),
|
||||
providerSettings.TracerProvider,
|
||||
providerSettings.MeterProvider,
|
||||
client.WithRequestResponseLog(true),
|
||||
client.WithRetryCount(3),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Provider{
|
||||
settings: settings,
|
||||
config: config,
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetLicense(ctx context.Context, key string) ([]byte, error) {
|
||||
response, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/licenses/me"),
|
||||
http.MethodGet,
|
||||
key,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []byte(gjson.GetBytes(response, "data").String()), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetCheckoutURL(ctx context.Context, key string, body []byte) ([]byte, error) {
|
||||
response, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/subscriptions/me/sessions/checkout"),
|
||||
http.MethodPost,
|
||||
key,
|
||||
body,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []byte(gjson.GetBytes(response, "data").String()), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetPortalURL(ctx context.Context, key string, body []byte) ([]byte, error) {
|
||||
response, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/subscriptions/me/sessions/portal"),
|
||||
http.MethodPost,
|
||||
key,
|
||||
body,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []byte(gjson.GetBytes(response, "data").String()), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetDeployment(ctx context.Context, key string) ([]byte, error) {
|
||||
response, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/deployments/me"),
|
||||
http.MethodGet,
|
||||
key,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []byte(gjson.GetBytes(response, "data").String()), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) PutMeters(ctx context.Context, key string, data []byte) error {
|
||||
_, err := provider.do(
|
||||
ctx,
|
||||
provider.config.DeprecatedURL.JoinPath("/api/v1/usage"),
|
||||
http.MethodPost,
|
||||
key,
|
||||
data,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) PutProfile(ctx context.Context, key string, body []byte) error {
|
||||
_, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/profiles/me"),
|
||||
http.MethodPut,
|
||||
key,
|
||||
body,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) PutHost(ctx context.Context, key string, body []byte) error {
|
||||
_, err := provider.do(
|
||||
ctx,
|
||||
provider.config.URL.JoinPath("/v2/deployments/me/hosts"),
|
||||
http.MethodPut,
|
||||
key,
|
||||
body,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) do(ctx context.Context, url *url.URL, method string, key string, requestBody []byte) ([]byte, error) {
|
||||
request, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
request.Header.Set("X-Signoz-Cloud-Api-Key", key)
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
|
||||
response, err := provider.httpClient.Do(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = response.Body.Close()
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if response.StatusCode/100 == 2 {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
return nil, provider.errFromStatusCode(response.StatusCode)
|
||||
}
|
||||
|
||||
// This can be taken down to the client package
|
||||
func (provider *Provider) errFromStatusCode(statusCode int) error {
|
||||
switch statusCode {
|
||||
case http.StatusBadRequest:
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "bad request")
|
||||
case http.StatusUnauthorized:
|
||||
return errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated")
|
||||
case http.StatusForbidden:
|
||||
return errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "forbidden")
|
||||
case http.StatusNotFound:
|
||||
return errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "not found")
|
||||
}
|
||||
|
||||
return errors.Newf(errors.TypeInternal, errors.CodeInternal, "internal")
|
||||
}
|
||||
@@ -110,6 +110,8 @@ module.exports = {
|
||||
// eslint rules need to remove
|
||||
'@typescript-eslint/no-shadow': 'off',
|
||||
'import/no-cycle': 'off',
|
||||
// https://typescript-eslint.io/rules/consistent-return/ check the warning for details
|
||||
'consistent-return': 'off',
|
||||
'prettier/prettier': [
|
||||
'error',
|
||||
{},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user