Compare commits
318 Commits
fix/remove
...
v0.69.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
726f2b0fa2 | ||
|
|
4a0b0aafbd | ||
|
|
837f434fe9 | ||
|
|
0baf0e9453 | ||
|
|
403043e076 | ||
|
|
7730f76128 | ||
|
|
6e3ffd555d | ||
|
|
c565c2b865 | ||
|
|
4ec1e66c7e | ||
|
|
89541862cc | ||
|
|
610f4d43e7 | ||
|
|
044a124cc1 | ||
|
|
0cf9003e3a | ||
|
|
644135a933 | ||
|
|
b465f74e4a | ||
|
|
e00e365964 | ||
|
|
5c45e1f7b3 | ||
|
|
16e61b45ac | ||
|
|
fdcdbf021a | ||
|
|
c92ef53e9c | ||
|
|
268f283785 | ||
|
|
c574adc634 | ||
|
|
939ab5270e | ||
|
|
42525b6067 | ||
|
|
c66cd3ce4e | ||
|
|
e9618d64bc | ||
|
|
8e11a988be | ||
|
|
92299e1b08 | ||
|
|
bab8c8274c | ||
|
|
265c67e5bd | ||
|
|
efc8c95d59 | ||
|
|
5708079c3c | ||
|
|
dbe78e55a9 | ||
|
|
a60371fb80 | ||
|
|
d5b847c091 | ||
|
|
c106f1c9a9 | ||
|
|
d6bfd95302 | ||
|
|
68ee677630 | ||
|
|
3ff862b483 | ||
|
|
f91badbce9 | ||
|
|
2ead4fbb66 | ||
|
|
56b17bcfef | ||
|
|
5839b65f7a | ||
|
|
3787c5ca24 | ||
|
|
458cd28cc2 | ||
|
|
64a4606275 | ||
|
|
505757b971 | ||
|
|
80740f646c | ||
|
|
e92d055c30 | ||
|
|
5c546e8efd | ||
|
|
ecd50f7232 | ||
|
|
15f85a645f | ||
|
|
366ca3bb3e | ||
|
|
43b0cdbb6a | ||
|
|
4967696da8 | ||
|
|
c5938b6c10 | ||
|
|
9feee6ff46 | ||
|
|
d48cdbfc4a | ||
|
|
dad72dd295 | ||
|
|
28d27bc5c1 | ||
|
|
3e675bb9a5 | ||
|
|
05c9dd68dd | ||
|
|
03fb388cd1 | ||
|
|
196b17dd1e | ||
|
|
93e9d15004 | ||
|
|
f11161ddb8 | ||
|
|
50db3cc39f | ||
|
|
6e27df9dcb | ||
|
|
7f6bad67d5 | ||
|
|
825d2dfcbb | ||
|
|
9f6419c2f8 | ||
|
|
421879cf7a | ||
|
|
00abadd429 | ||
|
|
14096f8d53 | ||
|
|
d2aa1cf06e | ||
|
|
838192cf5c | ||
|
|
5dfe245f2d | ||
|
|
53b86e4b5c | ||
|
|
5d9a2571df | ||
|
|
bef6cc945a | ||
|
|
2c2e248c95 | ||
|
|
2f62a9d36d | ||
|
|
04778b9641 | ||
|
|
26fe5e49e7 | ||
|
|
accafbc3ec | ||
|
|
8e7c78e1b1 | ||
|
|
53ebd39f41 | ||
|
|
b36ef944cc | ||
|
|
fa90fad373 | ||
|
|
77420b9d3a | ||
|
|
cecc57e72d | ||
|
|
512adc6471 | ||
|
|
42fefc65be | ||
|
|
dcc659907a | ||
|
|
b90ed375c2 | ||
|
|
a8a3bd3f7d | ||
|
|
7405bfbbee | ||
|
|
67e822e23e | ||
|
|
60dc479a19 | ||
|
|
85cf4f4e2e | ||
|
|
83aa48c721 | ||
|
|
823f84f857 | ||
|
|
8a4d45084d | ||
|
|
5bc6c33899 | ||
|
|
83f6dea2db | ||
|
|
7031c866e8 | ||
|
|
46bc7c7a21 | ||
|
|
6d9741c3a4 | ||
|
|
610a8ec704 | ||
|
|
cd9f27ab08 | ||
|
|
14fbb1fcda | ||
|
|
96da21df05 | ||
|
|
8608f02263 | ||
|
|
2701ae5c34 | ||
|
|
951593b0a3 | ||
|
|
e6766023dd | ||
|
|
bef5b96c5c | ||
|
|
b29359dee0 | ||
|
|
9a1cd65b73 | ||
|
|
8ab0c066d6 | ||
|
|
b333aa3775 | ||
|
|
8a3319cdf5 | ||
|
|
d09c4d947e | ||
|
|
2508e6f9f1 | ||
|
|
1b8213653a | ||
|
|
b499b10333 | ||
|
|
b35b975798 | ||
|
|
715f8a2363 | ||
|
|
8d1c4491b7 | ||
|
|
e3caa6a8f5 | ||
|
|
a1059ed949 | ||
|
|
8c46de8eac | ||
|
|
2b5a0ec496 | ||
|
|
a9440c010c | ||
|
|
f9e7eff357 | ||
|
|
0fbfb6b22b | ||
|
|
b25df66381 | ||
|
|
32fa5a403c | ||
|
|
f9d4cf19e9 | ||
|
|
81775c7d55 | ||
|
|
8d2666004b | ||
|
|
51baf7f8d3 | ||
|
|
31a2926375 | ||
|
|
8c6225185d | ||
|
|
d4458d65ad | ||
|
|
02d8fdb212 | ||
|
|
47d8c9e3e7 | ||
|
|
a383c708e3 | ||
|
|
99367be850 | ||
|
|
73bcc2af46 | ||
|
|
43f856c41b | ||
|
|
6384b25af3 | ||
|
|
507c0600cd | ||
|
|
3d092ec2ae | ||
|
|
2b8a610a07 | ||
|
|
f7f8bf1867 | ||
|
|
813cd845f4 | ||
|
|
6aee991633 | ||
|
|
2bfd31841e | ||
|
|
a320a16556 | ||
|
|
7cd8442e6e | ||
|
|
486632b64e | ||
|
|
328d955a74 | ||
|
|
a3e57a1829 | ||
|
|
24ab18d988 | ||
|
|
2e4956c2f7 | ||
|
|
b85f7921f4 | ||
|
|
0c2a15d86f | ||
|
|
afbba1ed44 | ||
|
|
20f748f9c4 | ||
|
|
96b5e0920f | ||
|
|
7fe4f8cc56 | ||
|
|
ed6abe5a95 | ||
|
|
a6968d452c | ||
|
|
0c5db1937e | ||
|
|
67058b2a17 | ||
|
|
e46d969143 | ||
|
|
e4505693b0 | ||
|
|
2dad9a3093 | ||
|
|
7b6bd83e9a | ||
|
|
d43adc24ef | ||
|
|
5044861773 | ||
|
|
71d1e12be7 | ||
|
|
5a70123b06 | ||
|
|
f410df846a | ||
|
|
d7bd72e2aa | ||
|
|
20e64b5102 | ||
|
|
0b03ff07f1 | ||
|
|
c01060ccf7 | ||
|
|
57c2326908 | ||
|
|
649560265e | ||
|
|
c8d0f7638e | ||
|
|
25484caa4c | ||
|
|
9ccc686c63 | ||
|
|
3ad6ff73df | ||
|
|
c93cf1ce95 | ||
|
|
a9ced66258 | ||
|
|
98a350692b | ||
|
|
d93f72f18d | ||
|
|
a59e7b9dfb | ||
|
|
91bbeaf175 | ||
|
|
22e61e1605 | ||
|
|
656d1c2b1c | ||
|
|
493ae4fd07 | ||
|
|
cd1ec561b1 | ||
|
|
0acf39a532 | ||
|
|
d859301d30 | ||
|
|
35f4eaa23b | ||
|
|
07c24bcdf3 | ||
|
|
77c5f17dce | ||
|
|
a11aadb712 | ||
|
|
bc9c7b5f1d | ||
|
|
1bba932d08 | ||
|
|
c1478c4e54 | ||
|
|
371224a64a | ||
|
|
504bc0d541 | ||
|
|
2faa0c6d4f | ||
|
|
969ac5028e | ||
|
|
3f7adeb040 | ||
|
|
323da3494b | ||
|
|
01fda51959 | ||
|
|
85ac21f253 | ||
|
|
fd9e9f0fb3 | ||
|
|
d5523fc092 | ||
|
|
2ec641b99e | ||
|
|
d1503f1418 | ||
|
|
e974e9d47f | ||
|
|
577a169508 | ||
|
|
939e2a3570 | ||
|
|
b64326070c | ||
|
|
63872983c6 | ||
|
|
eb6670980a | ||
|
|
831540eaf0 | ||
|
|
48f3b9cacb | ||
|
|
eaf8571fe9 | ||
|
|
22c10f9479 | ||
|
|
e748fb0655 | ||
|
|
fdc54a62a9 | ||
|
|
abe0ab69b0 | ||
|
|
e623c92615 | ||
|
|
dc5917db01 | ||
|
|
d6a7f0b6f4 | ||
|
|
471803115e | ||
|
|
8403a3362d | ||
|
|
64d46bc855 | ||
|
|
c9fee27604 | ||
|
|
f1b6b2d3d8 | ||
|
|
468f056530 | ||
|
|
7086470ce2 | ||
|
|
352296c6cd | ||
|
|
975307a8b8 | ||
|
|
12377be809 | ||
|
|
9d90b8d19c | ||
|
|
5005923ef4 | ||
|
|
db4338be42 | ||
|
|
c7d0598ec0 | ||
|
|
4978fb9599 | ||
|
|
7b18c3ba06 | ||
|
|
92cdb36879 | ||
|
|
580f0b816e | ||
|
|
b770fc2457 | ||
|
|
c177230cce | ||
|
|
2112047a02 | ||
|
|
03c193d5a1 | ||
|
|
b83b295318 | ||
|
|
fbe75cd057 | ||
|
|
860145fb1d | ||
|
|
2fe75e74cd | ||
|
|
8e19c346a4 | ||
|
|
1b33efe4cc | ||
|
|
2642338672 | ||
|
|
845dc00568 | ||
|
|
a1090bfdc5 | ||
|
|
44f41c55f9 | ||
|
|
42ac9ab6fe | ||
|
|
5c02250aae | ||
|
|
c49a9dac1a | ||
|
|
abc2ec2155 | ||
|
|
4dc5615d2f | ||
|
|
6c350f30aa | ||
|
|
6664e1bc02 | ||
|
|
438cbcef87 | ||
|
|
829e1f0920 | ||
|
|
68d25a8989 | ||
|
|
cc90321ac0 | ||
|
|
b10c22223b | ||
|
|
bdcae62bf9 | ||
|
|
cdde369748 | ||
|
|
4e26189778 | ||
|
|
523cbcd6fc | ||
|
|
eeadc021e1 | ||
|
|
952ab58023 | ||
|
|
3ca2fff5c5 | ||
|
|
ef3a9adb48 | ||
|
|
975f141604 | ||
|
|
c206f4fa5c | ||
|
|
e88e24e434 | ||
|
|
94e0423479 | ||
|
|
5891fbc229 | ||
|
|
8137ec54ba | ||
|
|
f7b80524a5 | ||
|
|
4be0508dd2 | ||
|
|
a31c4b8339 | ||
|
|
d7846338ce | ||
|
|
5dac1ad20a | ||
|
|
8d704c331c | ||
|
|
f8e47496fa | ||
|
|
6fef9d9676 | ||
|
|
190767fd0a | ||
|
|
1e78786cae | ||
|
|
6448fb17e7 | ||
|
|
f2e33d7ca9 | ||
|
|
6c7167a224 | ||
|
|
00421235b0 | ||
|
|
0e2b67059b | ||
|
|
910c44cefc | ||
|
|
8bad036423 | ||
|
|
a21830132f |
1
.github/workflows/build.yaml
vendored
1
.github/workflows/build.yaml
vendored
@@ -3,7 +3,6 @@ name: build-pipeline
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- develop
|
|
||||||
- main
|
- main
|
||||||
- release/v*
|
- release/v*
|
||||||
|
|
||||||
|
|||||||
83
.github/workflows/docs.yml
vendored
Normal file
83
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
name: "Update PR labels and Block PR until related docs are shipped for the feature"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
types: [opened, edited, labeled, unlabeled]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docs_label_check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check PR Title and Manage Labels
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const prTitle = context.payload.pull_request.title;
|
||||||
|
const prNumber = context.payload.pull_request.number;
|
||||||
|
const owner = context.repo.owner;
|
||||||
|
const repo = context.repo.repo;
|
||||||
|
|
||||||
|
// Fetch the current PR details to get labels
|
||||||
|
const pr = await github.rest.pulls.get({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
pull_number: prNumber
|
||||||
|
});
|
||||||
|
|
||||||
|
const labels = pr.data.labels.map(label => label.name);
|
||||||
|
|
||||||
|
if (prTitle.startsWith('feat:')) {
|
||||||
|
const hasDocsRequired = labels.includes('docs required');
|
||||||
|
const hasDocsShipped = labels.includes('docs shipped');
|
||||||
|
const hasDocsNotRequired = labels.includes('docs not required');
|
||||||
|
|
||||||
|
// If "docs not required" is present, skip the checks
|
||||||
|
if (hasDocsNotRequired && !hasDocsRequired) {
|
||||||
|
console.log("Skipping checks due to 'docs not required' label.");
|
||||||
|
return; // Exit the script early
|
||||||
|
}
|
||||||
|
|
||||||
|
// If "docs shipped" is present, remove "docs required" if it exists
|
||||||
|
if (hasDocsShipped && hasDocsRequired) {
|
||||||
|
await github.rest.issues.removeLabel({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
issue_number: prNumber,
|
||||||
|
name: 'docs required'
|
||||||
|
});
|
||||||
|
console.log("Removed 'docs required' label.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add "docs required" label if neither "docs shipped" nor "docs required" are present
|
||||||
|
if (!hasDocsRequired && !hasDocsShipped) {
|
||||||
|
await github.rest.issues.addLabels({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
issue_number: prNumber,
|
||||||
|
labels: ['docs required']
|
||||||
|
});
|
||||||
|
console.log("Added 'docs required' label.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the updated labels after any changes
|
||||||
|
const updatedPr = await github.rest.pulls.get({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
pull_number: prNumber
|
||||||
|
});
|
||||||
|
|
||||||
|
const updatedLabels = updatedPr.data.labels.map(label => label.name);
|
||||||
|
const updatedHasDocsRequired = updatedLabels.includes('docs required');
|
||||||
|
const updatedHasDocsShipped = updatedLabels.includes('docs shipped');
|
||||||
|
|
||||||
|
// Block PR if "docs required" is still present and "docs shipped" is missing
|
||||||
|
if (updatedHasDocsRequired && !updatedHasDocsShipped) {
|
||||||
|
core.setFailed("This PR requires documentation. Please remove the 'docs required' label and add the 'docs shipped' label to proceed.");
|
||||||
|
}
|
||||||
2
.github/workflows/e2e-k3s.yaml
vendored
2
.github/workflows/e2e-k3s.yaml
vendored
@@ -42,7 +42,7 @@ jobs:
|
|||||||
kubectl create ns sample-application
|
kubectl create ns sample-application
|
||||||
|
|
||||||
# apply hotrod k8s manifest file
|
# apply hotrod k8s manifest file
|
||||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||||
|
|
||||||
# wait for all deployments in sample-application namespace to be READY
|
# wait for all deployments in sample-application namespace to be READY
|
||||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||||
|
|||||||
5
.github/workflows/jest-coverage-changes.yml
vendored
5
.github/workflows/jest-coverage-changes.yml
vendored
@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: develop
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
@@ -11,7 +12,7 @@ jobs:
|
|||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: "refs/heads/develop"
|
ref: "refs/heads/main"
|
||||||
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||||
|
|
||||||
- name: Fetch branch
|
- name: Fetch branch
|
||||||
|
|||||||
36
.github/workflows/prereleaser.yaml
vendored
Normal file
36
.github/workflows/prereleaser.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: prereleaser
|
||||||
|
|
||||||
|
on:
|
||||||
|
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||||
|
schedule:
|
||||||
|
- cron: '30 9 * * 3'
|
||||||
|
|
||||||
|
# allow manual triggering of the workflow by a maintainer
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
release_type:
|
||||||
|
description: "Type of the release"
|
||||||
|
type: choice
|
||||||
|
required: true
|
||||||
|
options:
|
||||||
|
- 'patch'
|
||||||
|
- 'minor'
|
||||||
|
- 'major'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
verify:
|
||||||
|
uses: signoz/primus.workflows/.github/workflows/github-verify.yaml@main
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
PRIMUS_REF: main
|
||||||
|
GITHUB_TEAM_NAME: releaser
|
||||||
|
GITHUB_MEMBER_NAME: ${{ github.actor }}
|
||||||
|
signoz:
|
||||||
|
if: ${{ always() && (needs.verify.result == 'success' || github.event.name == 'schedule') }}
|
||||||
|
uses: signoz/primus.workflows/.github/workflows/releaser.yaml@main
|
||||||
|
secrets: inherit
|
||||||
|
needs: [verify]
|
||||||
|
with:
|
||||||
|
PRIMUS_REF: main
|
||||||
|
PROJECT_NAME: signoz
|
||||||
|
RELEASE_TYPE: ${{ inputs.release_type || 'minor' }}
|
||||||
12
.github/workflows/push.yaml
vendored
12
.github/workflows/push.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- develop
|
|
||||||
tags:
|
tags:
|
||||||
- v*
|
- v*
|
||||||
|
|
||||||
@@ -58,6 +57,17 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
- name: Create .env file
|
||||||
|
run: |
|
||||||
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||||
|
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||||
- name: Setup golang
|
- name: Setup golang
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
32
.github/workflows/releaser.yaml
vendored
Normal file
32
.github/workflows/releaser.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: releaser
|
||||||
|
|
||||||
|
on:
|
||||||
|
# trigger on new latest release
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
detect:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
release_type: ${{ steps.find.outputs.release_type }}
|
||||||
|
steps:
|
||||||
|
- id: find
|
||||||
|
name: find
|
||||||
|
run: |
|
||||||
|
release_tag=${{ github.event.release.tag_name }}
|
||||||
|
patch_number=$(echo $release_tag | awk -F. '{print $3}')
|
||||||
|
release_type="minor"
|
||||||
|
if [[ $patch_number -ne 0 ]]; then
|
||||||
|
release_type="patch"
|
||||||
|
fi
|
||||||
|
echo "release_type=${release_type}" >> "$GITHUB_OUTPUT"
|
||||||
|
charts:
|
||||||
|
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||||
|
secrets: inherit
|
||||||
|
needs: [detect]
|
||||||
|
with:
|
||||||
|
PRIMUS_REF: main
|
||||||
|
GITHUB_REPOSITORY_NAME: charts
|
||||||
|
GITHUB_EVENT_NAME: prereleaser
|
||||||
|
GITHUB_EVENT_PAYLOAD: "{\"release_type\": \"${{ needs.detect.outputs.release_type }}\"}"
|
||||||
1
.github/workflows/sonar.yml
vendored
1
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,6 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- develop
|
|
||||||
paths:
|
paths:
|
||||||
- 'frontend/**'
|
- 'frontend/**'
|
||||||
defaults:
|
defaults:
|
||||||
|
|||||||
7
.github/workflows/staging-deployment.yaml
vendored
7
.github/workflows/staging-deployment.yaml
vendored
@@ -1,12 +1,12 @@
|
|||||||
name: staging-deployment
|
name: staging-deployment
|
||||||
# Trigger deployment only on push to develop branch
|
# Trigger deployment only on push to main branch
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- develop
|
- main
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
name: Deploy latest develop branch to staging
|
name: Deploy latest main branch to staging
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: staging
|
environment: staging
|
||||||
permissions:
|
permissions:
|
||||||
@@ -38,6 +38,7 @@ jobs:
|
|||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
export OTELCOL_TAG="main"
|
export OTELCOL_TAG="main"
|
||||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
|
export KAFKA_SPAN_EVAL="true"
|
||||||
docker system prune --force
|
docker system prune --force
|
||||||
docker pull signoz/signoz-otel-collector:main
|
docker pull signoz/signoz-otel-collector:main
|
||||||
docker pull signoz/signoz-schema-migrator:main
|
docker pull signoz/signoz-schema-migrator:main
|
||||||
|
|||||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
git add .
|
git add .
|
||||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||||
git fetch origin
|
git fetch origin
|
||||||
git checkout develop
|
git checkout main
|
||||||
git pull
|
git pull
|
||||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||||
git branch -D ${GITHUB_BRANCH}
|
git branch -D ${GITHUB_BRANCH}
|
||||||
|
|||||||
@@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
|||||||
**5.1.1 To install the HotROD sample app:**
|
**5.1.1 To install the HotROD sample app:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
|||||||
**5.1.4 To delete the HotROD sample app:**
|
**5.1.4 To delete the HotROD sample app:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||||
| HOTROD_NAMESPACE=sample-application bash
|
| HOTROD_NAMESPACE=sample-application bash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
10
Makefile
10
Makefile
@@ -8,6 +8,7 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
|||||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||||
|
ZEUS_URL ?= https://api.signoz.cloud
|
||||||
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||||
|
|
||||||
# Internal variables or constants.
|
# Internal variables or constants.
|
||||||
@@ -33,8 +34,9 @@ buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
|||||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||||
|
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
|
||||||
|
|
||||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
|
||||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||||
|
|
||||||
all: build-push-frontend build-push-query-service
|
all: build-push-frontend build-push-query-service
|
||||||
@@ -96,12 +98,12 @@ build-query-service-static-arm64:
|
|||||||
|
|
||||||
# Steps to build static binary of query service for all platforms
|
# Steps to build static binary of query service for all platforms
|
||||||
.PHONY: build-query-service-static-all
|
.PHONY: build-query-service-static-all
|
||||||
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
|
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
|
||||||
|
|
||||||
# Steps to build and push docker image of query service
|
# Steps to build and push docker image of query service
|
||||||
.PHONY: build-query-service-amd64 build-push-query-service
|
.PHONY: build-query-service-amd64 build-push-query-service
|
||||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||||
build-query-service-amd64: build-query-service-static-amd64
|
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -188,4 +190,4 @@ check-no-ee-references:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
test:
|
test:
|
||||||
go test ./pkg/query-service/...
|
go test ./pkg/...
|
||||||
|
|||||||
70
conf/example.yaml
Normal file
70
conf/example.yaml
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
##################### SigNoz Configuration Example #####################
|
||||||
|
#
|
||||||
|
# Do not modify this file
|
||||||
|
#
|
||||||
|
|
||||||
|
##################### Instrumentation #####################
|
||||||
|
instrumentation:
|
||||||
|
logs:
|
||||||
|
level: info
|
||||||
|
enabled: false
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
exporter:
|
||||||
|
otlp:
|
||||||
|
endpoint: localhost:4317
|
||||||
|
traces:
|
||||||
|
enabled: false
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
exporter:
|
||||||
|
otlp:
|
||||||
|
endpoint: localhost:4317
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
readers:
|
||||||
|
pull:
|
||||||
|
exporter:
|
||||||
|
prometheus:
|
||||||
|
host: "0.0.0.0"
|
||||||
|
port: 9090
|
||||||
|
|
||||||
|
##################### Web #####################
|
||||||
|
web:
|
||||||
|
# Whether to enable the web frontend
|
||||||
|
enabled: true
|
||||||
|
# The prefix to serve web on
|
||||||
|
prefix: /
|
||||||
|
# The directory containing the static build files.
|
||||||
|
directory: /etc/signoz/web
|
||||||
|
|
||||||
|
##################### Cache #####################
|
||||||
|
cache:
|
||||||
|
# specifies the caching provider to use.
|
||||||
|
provider: memory
|
||||||
|
# memory: Uses in-memory caching.
|
||||||
|
memory:
|
||||||
|
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||||
|
ttl: 60000000000
|
||||||
|
# The interval at which the cache will be cleaned up
|
||||||
|
cleanupInterval: 1m
|
||||||
|
# redis: Uses Redis as the caching backend.
|
||||||
|
redis:
|
||||||
|
# The hostname or IP address of the Redis server.
|
||||||
|
host: localhost
|
||||||
|
# The port on which the Redis server is running. Default is usually 6379.
|
||||||
|
port: 6379
|
||||||
|
# The password for authenticating with the Redis server, if required.
|
||||||
|
password:
|
||||||
|
# The Redis database number to use
|
||||||
|
db: 0
|
||||||
|
|
||||||
|
##################### SQLStore #####################
|
||||||
|
sqlstore:
|
||||||
|
# specifies the SQLStore provider to use.
|
||||||
|
provider: sqlite
|
||||||
|
# The maximum number of open connections to the database.
|
||||||
|
max_open_conns: 100
|
||||||
|
sqlite:
|
||||||
|
# The path to the SQLite database file.
|
||||||
|
path: /var/lib/signoz/signoz.db
|
||||||
@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
|
|||||||
```sh
|
```sh
|
||||||
kubectl create ns sample-application
|
kubectl create ns sample-application
|
||||||
|
|
||||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
To generate load:
|
To generate load:
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
tty: true
|
tty: true
|
||||||
@@ -16,14 +15,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"0.0.0.0:8123/ping"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -32,15 +24,12 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-db-depend: &db-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
- clickhouse
|
- clickhouse
|
||||||
- otel-collector-migrator
|
- otel-collector-migrator
|
||||||
# - clickhouse-2
|
# - clickhouse-2
|
||||||
# - clickhouse-3
|
# - clickhouse-3
|
||||||
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
@@ -57,7 +46,6 @@ services:
|
|||||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
- ZOO_AUTOPURGE_INTERVAL=1
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
# zookeeper-2:
|
# zookeeper-2:
|
||||||
# image: bitnami/zookeeper:3.7.0
|
# image: bitnami/zookeeper:3.7.0
|
||||||
# hostname: zookeeper-2
|
# hostname: zookeeper-2
|
||||||
@@ -89,9 +77,8 @@ services:
|
|||||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
<<: *clickhouse-defaults
|
!!merge <<: *clickhouse-defaults
|
||||||
hostname: clickhouse
|
hostname: clickhouse
|
||||||
# ports:
|
# ports:
|
||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
@@ -103,7 +90,6 @@ services:
|
|||||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# hostname: clickhouse-2
|
# hostname: clickhouse-2
|
||||||
@@ -131,7 +117,6 @@ services:
|
|||||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.7
|
image: signoz/alertmanager:0.23.7
|
||||||
volumes:
|
volumes:
|
||||||
@@ -144,14 +129,9 @@ services:
|
|||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.56.0
|
image: signoz/query-service:0.69.0
|
||||||
command:
|
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
|
||||||
[
|
|
||||||
"-config=/root/config/prometheus.yml",
|
|
||||||
"--use-logs-new-schema=true"
|
|
||||||
]
|
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -169,24 +149,16 @@ services:
|
|||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-swarm
|
- DEPLOYMENT_TYPE=docker-swarm
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"localhost:8080/api/v1/health"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *db-depend
|
!!merge <<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.56.0
|
image: signoz/frontend:0.69.0
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@@ -197,15 +169,9 @@ services:
|
|||||||
- "3301:3301"
|
- "3301:3301"
|
||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.102.12
|
image: signoz/signoz-otel-collector:0.111.24
|
||||||
command:
|
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
|
||||||
"--manager-config=/etc/manager-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
@@ -214,7 +180,6 @@ services:
|
|||||||
- /:/hostfs:ro
|
- /:/hostfs:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
|
||||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
ports:
|
ports:
|
||||||
# - "1777:1777" # pprof extension
|
# - "1777:1777" # pprof extension
|
||||||
@@ -236,20 +201,20 @@ services:
|
|||||||
- clickhouse
|
- clickhouse
|
||||||
- otel-collector-migrator
|
- otel-collector-migrator
|
||||||
- query-service
|
- query-service
|
||||||
|
|
||||||
otel-collector-migrator:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-schema-migrator:0.102.10
|
image: signoz/signoz-schema-migrator:0.111.24
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
command:
|
command:
|
||||||
- "--dsn=tcp://clickhouse:9000"
|
- "sync"
|
||||||
depends_on:
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
- clickhouse
|
- "--up="
|
||||||
# - clickhouse-2
|
depends_on:
|
||||||
# - clickhouse-3
|
- clickhouse
|
||||||
|
# - clickhouse-2
|
||||||
|
# - clickhouse-3
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
volumes:
|
volumes:
|
||||||
@@ -262,17 +227,15 @@ services:
|
|||||||
mode: global
|
mode: global
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
command: [ "all" ]
|
command: ["all"]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
logging:
|
logging:
|
||||||
options:
|
options:
|
||||||
max-size: 50m
|
max-size: 50m
|
||||||
max-file: "3"
|
max-file: "3"
|
||||||
|
|
||||||
load-hotrod:
|
load-hotrod:
|
||||||
image: "signoz/locust:1.2.3"
|
image: "signoz/locust:1.2.3"
|
||||||
hostname: load-hotrod
|
hostname: load-hotrod
|
||||||
|
|||||||
@@ -66,28 +66,6 @@ processors:
|
|||||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
signozspanmetrics/cumulative:
|
|
||||||
metrics_exporter: clickhousemetricswrite
|
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
|
||||||
dimensions_cache_size: 100000
|
|
||||||
dimensions:
|
|
||||||
- name: service.namespace
|
|
||||||
default: default
|
|
||||||
- name: deployment.environment
|
|
||||||
default: default
|
|
||||||
# This is added to ensure the uniqueness of the timeseries
|
|
||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
|
||||||
# collectors result in incorrect APM metrics
|
|
||||||
- name: signoz.collector.id
|
|
||||||
- name: service.version
|
|
||||||
- name: browser.platform
|
|
||||||
- name: browser.mobile
|
|
||||||
- name: k8s.cluster.name
|
|
||||||
- name: k8s.node.name
|
|
||||||
- name: k8s.namespace.name
|
|
||||||
- name: host.name
|
|
||||||
- name: host.type
|
|
||||||
- name: container.name
|
|
||||||
# memory_limiter:
|
# memory_limiter:
|
||||||
# # 80% of maximum memory up to 2G
|
# # 80% of maximum memory up to 2G
|
||||||
# limit_mib: 1500
|
# limit_mib: 1500
|
||||||
@@ -131,18 +109,19 @@ processors:
|
|||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER}
|
|
||||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
|
use_new_schema: true
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
|
clickhousemetricswritev2:
|
||||||
|
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||||
# logging: {}
|
# logging: {}
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/signoz_logs
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER}
|
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
use_new_schema: true
|
use_new_schema: true
|
||||||
extensions:
|
extensions:
|
||||||
@@ -163,20 +142,20 @@ service:
|
|||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
processors: [signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||||
metrics/generic:
|
metrics/hostmetrics:
|
||||||
receivers: [hostmetrics]
|
receivers: [hostmetrics]
|
||||||
processors: [resourcedetection, batch]
|
processors: [resourcedetection, batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||||
metrics/prometheus:
|
metrics/prometheus:
|
||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus]
|
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, tcplog/docker]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- test-app-docker-compose.yaml
|
- test-app-docker-compose.yaml
|
||||||
|
|
||||||
services:
|
services:
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
@@ -20,7 +18,6 @@ services:
|
|||||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
- ZOO_AUTOPURGE_INTERVAL=1
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
container_name: signoz-clickhouse
|
container_name: signoz-clickhouse
|
||||||
@@ -43,18 +40,10 @@ services:
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"0.0.0.0:8123/ping"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
image: signoz/alertmanager:0.23.7
|
image: signoz/alertmanager:0.23.7
|
||||||
@@ -67,31 +56,25 @@ services:
|
|||||||
command:
|
command:
|
||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
otel-collector-migrator:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||||
container_name: otel-migrator
|
container_name: otel-migrator
|
||||||
command:
|
command:
|
||||||
|
- "sync"
|
||||||
- "--dsn=tcp://clickhouse:9000"
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
- "--up="
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
otel-collector:
|
otel-collector:
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
image: signoz/signoz-otel-collector:0.102.12
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
|
||||||
command:
|
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
|
||||||
"--manager-config=/etc/manager-config.yaml",
|
|
||||||
"--copy-path=/var/tmp/collector-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
# user: root # required for reading docker container logs
|
# user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
@@ -120,7 +103,6 @@ services:
|
|||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
query-service:
|
query-service:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
container_name: signoz-logspout
|
container_name: signoz-logspout
|
||||||
|
|||||||
@@ -25,7 +25,8 @@ services:
|
|||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"--use-logs-new-schema=true"
|
"--use-logs-new-schema=true",
|
||||||
|
"--use-trace-new-schema=true"
|
||||||
]
|
]
|
||||||
ports:
|
ports:
|
||||||
- "6060:6060"
|
- "6060:6060"
|
||||||
|
|||||||
@@ -13,14 +13,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"0.0.0.0:8123/ping"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -29,20 +22,17 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-db-depend: &db-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
otel-collector-migrator-sync:
|
otel-collector-migrator-sync:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
container_name: signoz-zookeeper-1
|
container_name: signoz-zookeeper-1
|
||||||
@@ -59,7 +49,6 @@ services:
|
|||||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
- ZOO_AUTOPURGE_INTERVAL=1
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
# zookeeper-2:
|
# zookeeper-2:
|
||||||
# image: bitnami/zookeeper:3.7.0
|
# image: bitnami/zookeeper:3.7.0
|
||||||
# container_name: signoz-zookeeper-2
|
# container_name: signoz-zookeeper-2
|
||||||
@@ -93,9 +82,8 @@ services:
|
|||||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
<<: *clickhouse-defaults
|
!!merge <<: *clickhouse-defaults
|
||||||
container_name: signoz-clickhouse
|
container_name: signoz-clickhouse
|
||||||
hostname: clickhouse
|
hostname: clickhouse
|
||||||
ports:
|
ports:
|
||||||
@@ -110,7 +98,6 @@ services:
|
|||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# container_name: signoz-clickhouse-2
|
# container_name: signoz-clickhouse-2
|
||||||
@@ -128,7 +115,6 @@ services:
|
|||||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# container_name: signoz-clickhouse-3
|
# container_name: signoz-clickhouse-3
|
||||||
@@ -145,7 +131,6 @@ services:
|
|||||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
@@ -158,17 +143,11 @@ services:
|
|||||||
command:
|
command:
|
||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
|
image: signoz/query-service:${DOCKER_TAG:-0.69.0}
|
||||||
container_name: signoz-query-service
|
container_name: signoz-query-service
|
||||||
command:
|
command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
|
||||||
[
|
|
||||||
"-config=/root/config/prometheus.yml",
|
|
||||||
"--use-logs-new-schema=true"
|
|
||||||
]
|
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -187,21 +166,13 @@ services:
|
|||||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"localhost:8080/api/v1/health"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
<<: *db-depend
|
!!merge <<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
|
image: signoz/frontend:${DOCKER_TAG:-0.69.0}
|
||||||
container_name: signoz-frontend
|
container_name: signoz-frontend
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -211,9 +182,8 @@ services:
|
|||||||
- "3301:3301"
|
- "3301:3301"
|
||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector-migrator-sync:
|
otel-collector-migrator-sync:
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||||
container_name: otel-migrator-sync
|
container_name: otel-migrator-sync
|
||||||
command:
|
command:
|
||||||
- "sync"
|
- "sync"
|
||||||
@@ -222,13 +192,12 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
|
|
||||||
otel-collector-migrator-async:
|
otel-collector-migrator-async:
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||||
container_name: otel-migrator-async
|
container_name: otel-migrator-async
|
||||||
command:
|
command:
|
||||||
- "async"
|
- "async"
|
||||||
@@ -239,21 +208,14 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
otel-collector-migrator-sync:
|
otel-collector-migrator-sync:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.12}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
command:
|
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
|
||||||
"--manager-config=/etc/manager-config.yaml",
|
|
||||||
"--copy-path=/var/tmp/collector-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
@@ -262,7 +224,6 @@ services:
|
|||||||
- /:/hostfs:ro
|
- /:/hostfs:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
|
||||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
ports:
|
ports:
|
||||||
# - "1777:1777" # pprof extension
|
# - "1777:1777" # pprof extension
|
||||||
@@ -284,7 +245,6 @@ services:
|
|||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
query-service:
|
query-service:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
container_name: signoz-logspout
|
container_name: signoz-logspout
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- test-app-docker-compose.yaml
|
- test-app-docker-compose.yaml
|
||||||
|
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
@@ -18,14 +16,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"0.0.0.0:8123/ping"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -34,20 +25,17 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-db-depend: &db-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
otel-collector-migrator:
|
otel-collector-migrator:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
container_name: signoz-zookeeper-1
|
container_name: signoz-zookeeper-1
|
||||||
@@ -64,7 +52,6 @@ services:
|
|||||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
- ZOO_AUTOPURGE_INTERVAL=1
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
# zookeeper-2:
|
# zookeeper-2:
|
||||||
# image: bitnami/zookeeper:3.7.0
|
# image: bitnami/zookeeper:3.7.0
|
||||||
# container_name: signoz-zookeeper-2
|
# container_name: signoz-zookeeper-2
|
||||||
@@ -98,9 +85,8 @@ services:
|
|||||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
<<: *clickhouse-defaults
|
!!merge <<: *clickhouse-defaults
|
||||||
container_name: signoz-clickhouse
|
container_name: signoz-clickhouse
|
||||||
hostname: clickhouse
|
hostname: clickhouse
|
||||||
ports:
|
ports:
|
||||||
@@ -115,7 +101,6 @@ services:
|
|||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# container_name: signoz-clickhouse-2
|
# container_name: signoz-clickhouse-2
|
||||||
@@ -133,7 +118,6 @@ services:
|
|||||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# container_name: signoz-clickhouse-3
|
# container_name: signoz-clickhouse-3
|
||||||
@@ -150,7 +134,6 @@ services:
|
|||||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
@@ -163,18 +146,11 @@ services:
|
|||||||
command:
|
command:
|
||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
|
image: signoz/query-service:${DOCKER_TAG:-0.69.0}
|
||||||
container_name: signoz-query-service
|
container_name: signoz-query-service
|
||||||
command:
|
command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
|
||||||
[
|
|
||||||
"-config=/root/config/prometheus.yml",
|
|
||||||
"-gateway-url=https://api.staging.signoz.cloud",
|
|
||||||
"--use-logs-new-schema=true"
|
|
||||||
]
|
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -191,23 +167,16 @@ services:
|
|||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"localhost:8080/api/v1/health"
|
|
||||||
]
|
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
<<: *db-depend
|
!!merge <<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
|
image: signoz/frontend:${DOCKER_TAG:-0.69.0}
|
||||||
container_name: signoz-frontend
|
container_name: signoz-frontend
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -217,31 +186,22 @@ services:
|
|||||||
- "3301:3301"
|
- "3301:3301"
|
||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector-migrator:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||||
container_name: otel-migrator
|
container_name: otel-migrator
|
||||||
command:
|
command:
|
||||||
- "--dsn=tcp://clickhouse:9000"
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.12}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
command:
|
command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
|
||||||
"--manager-config=/etc/manager-config.yaml",
|
|
||||||
"--copy-path=/var/tmp/collector-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
@@ -250,7 +210,6 @@ services:
|
|||||||
- /:/hostfs:ro
|
- /:/hostfs:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
|
||||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
ports:
|
ports:
|
||||||
# - "1777:1777" # pprof extension
|
# - "1777:1777" # pprof extension
|
||||||
@@ -272,7 +231,6 @@ services:
|
|||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
query-service:
|
query-service:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
container_name: signoz-logspout
|
container_name: signoz-logspout
|
||||||
|
|||||||
@@ -57,35 +57,11 @@ receivers:
|
|||||||
labels:
|
labels:
|
||||||
job_name: otel-collector
|
job_name: otel-collector
|
||||||
|
|
||||||
|
|
||||||
processors:
|
processors:
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 10000
|
send_batch_size: 10000
|
||||||
send_batch_max_size: 11000
|
send_batch_max_size: 11000
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
signozspanmetrics/cumulative:
|
|
||||||
metrics_exporter: clickhousemetricswrite
|
|
||||||
metrics_flush_interval: 60s
|
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
|
||||||
dimensions_cache_size: 100000
|
|
||||||
dimensions:
|
|
||||||
- name: service.namespace
|
|
||||||
default: default
|
|
||||||
- name: deployment.environment
|
|
||||||
default: default
|
|
||||||
# This is added to ensure the uniqueness of the timeseries
|
|
||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
|
||||||
# collectors result in incorrect APM metrics
|
|
||||||
- name: signoz.collector.id
|
|
||||||
- name: service.version
|
|
||||||
- name: browser.platform
|
|
||||||
- name: browser.mobile
|
|
||||||
- name: k8s.cluster.name
|
|
||||||
- name: k8s.node.name
|
|
||||||
- name: k8s.namespace.name
|
|
||||||
- name: host.name
|
|
||||||
- name: host.type
|
|
||||||
- name: container.name
|
|
||||||
# memory_limiter:
|
# memory_limiter:
|
||||||
# # 80% of maximum memory up to 2G
|
# # 80% of maximum memory up to 2G
|
||||||
# limit_mib: 1500
|
# limit_mib: 1500
|
||||||
@@ -142,17 +118,18 @@ extensions:
|
|||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER}
|
|
||||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
|
use_new_schema: true
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
|
clickhousemetricswritev2:
|
||||||
|
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/signoz_logs
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER}
|
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
use_new_schema: true
|
use_new_schema: true
|
||||||
# logging: {}
|
# logging: {}
|
||||||
@@ -170,20 +147,20 @@ service:
|
|||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
processors: [signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||||
metrics/generic:
|
metrics/hostmetrics:
|
||||||
receivers: [hostmetrics]
|
receivers: [hostmetrics]
|
||||||
processors: [resourcedetection, batch]
|
processors: [resourcedetection, batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||||
metrics/prometheus:
|
metrics/prometheus:
|
||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus]
|
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, tcplog/docker]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
|
|||||||
@@ -32,6 +32,11 @@ has_cmd() {
|
|||||||
command -v "$1" > /dev/null 2>&1
|
command -v "$1" > /dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Check if docker compose plugin is present
|
||||||
|
has_docker_compose_plugin() {
|
||||||
|
docker compose version > /dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
is_mac() {
|
is_mac() {
|
||||||
[[ $OSTYPE == darwin* ]]
|
[[ $OSTYPE == darwin* ]]
|
||||||
}
|
}
|
||||||
@@ -183,9 +188,7 @@ install_docker() {
|
|||||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
compose_version () {
|
compose_version () {
|
||||||
@@ -227,12 +230,6 @@ start_docker() {
|
|||||||
echo "Starting docker service"
|
echo "Starting docker service"
|
||||||
$sudo_cmd systemctl start docker.service
|
$sudo_cmd systemctl start docker.service
|
||||||
fi
|
fi
|
||||||
# if [[ -z $sudo_cmd ]]; then
|
|
||||||
# docker ps > /dev/null && true
|
|
||||||
# if [[ $? -ne 0 ]]; then
|
|
||||||
# request_sudo
|
|
||||||
# fi
|
|
||||||
# fi
|
|
||||||
if [[ -z $sudo_cmd ]]; then
|
if [[ -z $sudo_cmd ]]; then
|
||||||
if ! docker ps > /dev/null && true; then
|
if ! docker ps > /dev/null && true; then
|
||||||
request_sudo
|
request_sudo
|
||||||
@@ -265,7 +262,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
|||||||
|
|
||||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||||
|
|
||||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||||
@@ -296,11 +293,6 @@ request_sudo() {
|
|||||||
if (( $EUID != 0 )); then
|
if (( $EUID != 0 )); then
|
||||||
sudo_cmd="sudo"
|
sudo_cmd="sudo"
|
||||||
echo -e "Please enter your sudo password, if prompted."
|
echo -e "Please enter your sudo password, if prompted."
|
||||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
|
||||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
|
||||||
# echo "Need sudo privileges to proceed with the installation."
|
|
||||||
# exit 1;
|
|
||||||
# fi
|
|
||||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||||
echo "Need sudo privileges to proceed with the installation."
|
echo "Need sudo privileges to proceed with the installation."
|
||||||
exit 1;
|
exit 1;
|
||||||
@@ -317,6 +309,7 @@ echo -e "👋 Thank you for trying out SigNoz! "
|
|||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
sudo_cmd=""
|
sudo_cmd=""
|
||||||
|
docker_compose_cmd=""
|
||||||
|
|
||||||
# Check sudo permissions
|
# Check sudo permissions
|
||||||
if (( $EUID != 0 )); then
|
if (( $EUID != 0 )); then
|
||||||
@@ -362,28 +355,8 @@ else
|
|||||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# echo ""
|
|
||||||
|
|
||||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
|
||||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
|
||||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
|
||||||
|
|
||||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
|
||||||
# do
|
|
||||||
# # echo $choice_setup
|
|
||||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
|
||||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
|
||||||
# # echo $choice_setup
|
|
||||||
# done
|
|
||||||
|
|
||||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
|
||||||
# setup_type='clickhouse'
|
|
||||||
# fi
|
|
||||||
|
|
||||||
setup_type='clickhouse'
|
setup_type='clickhouse'
|
||||||
|
|
||||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
|
||||||
|
|
||||||
# Run bye if failure happens
|
# Run bye if failure happens
|
||||||
trap bye EXIT
|
trap bye EXIT
|
||||||
|
|
||||||
@@ -455,8 +428,6 @@ if [[ $desired_os -eq 0 ]]; then
|
|||||||
send_event "os_not_supported"
|
send_event "os_not_supported"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# check_ports_occupied
|
|
||||||
|
|
||||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||||
if ! is_command_present docker; then
|
if ! is_command_present docker; then
|
||||||
|
|
||||||
@@ -486,27 +457,39 @@ if ! is_command_present docker; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if has_docker_compose_plugin; then
|
||||||
|
echo "docker compose plugin is present, using it"
|
||||||
|
docker_compose_cmd="docker compose"
|
||||||
# Install docker-compose
|
# Install docker-compose
|
||||||
if ! is_command_present docker-compose; then
|
else
|
||||||
request_sudo
|
docker_compose_cmd="docker-compose"
|
||||||
install_docker_compose
|
if ! is_command_present docker-compose; then
|
||||||
|
request_sudo
|
||||||
|
install_docker_compose
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
start_docker
|
start_docker
|
||||||
|
|
||||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
# check for open ports, if signoz is not installed
|
||||||
|
if is_command_present docker-compose; then
|
||||||
|
if $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
|
||||||
|
echo "SigNoz already installed, skipping the occupied ports check"
|
||||||
|
else
|
||||||
|
check_ports_occupied
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||||
echo
|
echo
|
||||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||||
|
|
||||||
wait_for_containers_start 60
|
wait_for_containers_start 60
|
||||||
echo ""
|
echo ""
|
||||||
@@ -516,7 +499,7 @@ if [[ $status_code -ne 200 ]]; then
|
|||||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||||
|
|
||||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||||
@@ -537,7 +520,7 @@ else
|
|||||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||||
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# use a minimal alpine image
|
# use a minimal alpine image
|
||||||
FROM alpine:3.18.6
|
FROM alpine:3.20.3
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
@@ -23,6 +23,9 @@ COPY pkg/query-service/templates /root/templates
|
|||||||
# Make query-service executable for non-root users
|
# Make query-service executable for non-root users
|
||||||
RUN chmod 755 /root /root/query-service
|
RUN chmod 755 /root /root/query-service
|
||||||
|
|
||||||
|
# Copy frontend
|
||||||
|
COPY frontend/build/ /etc/signoz/web/
|
||||||
|
|
||||||
# run the binary
|
# run the binary
|
||||||
ENTRYPOINT ["./query-service"]
|
ENTRYPOINT ["./query-service"]
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/usage"
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
@@ -34,12 +35,14 @@ type APIHandlerOptions struct {
|
|||||||
FeatureFlags baseint.FeatureLookup
|
FeatureFlags baseint.FeatureLookup
|
||||||
LicenseManager *license.Manager
|
LicenseManager *license.Manager
|
||||||
IntegrationsController *integrations.Controller
|
IntegrationsController *integrations.Controller
|
||||||
|
CloudIntegrationsController *cloudintegrations.Controller
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
Gateway *httputil.ReverseProxy
|
Gateway *httputil.ReverseProxy
|
||||||
// Querier Influx Interval
|
// Querier Influx Interval
|
||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
UseLogsNewSchema bool
|
UseLogsNewSchema bool
|
||||||
|
UseTraceNewSchema bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type APIHandler struct {
|
type APIHandler struct {
|
||||||
@@ -61,10 +64,12 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
|||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
FeatureFlags: opts.FeatureFlags,
|
FeatureFlags: opts.FeatureFlags,
|
||||||
IntegrationsController: opts.IntegrationsController,
|
IntegrationsController: opts.IntegrationsController,
|
||||||
|
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
Cache: opts.Cache,
|
Cache: opts.Cache,
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||||
|
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -173,14 +178,22 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
|||||||
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||||
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||||
|
|
||||||
|
// v2
|
||||||
router.HandleFunc("/api/v2/licenses",
|
router.HandleFunc("/api/v2/licenses",
|
||||||
am.ViewAccess(ah.listLicensesV2)).
|
am.ViewAccess(ah.listLicensesV2)).
|
||||||
Methods(http.MethodGet)
|
Methods(http.MethodGet)
|
||||||
|
|
||||||
|
// v3
|
||||||
|
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
|
||||||
|
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
|
||||||
|
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
|
||||||
|
|
||||||
|
// v4
|
||||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||||
|
|
||||||
// Gateway
|
// Gateway
|
||||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
||||||
|
|
||||||
ah.APIHandler.RegisterRoutes(router, am)
|
ah.APIHandler.RegisterRoutes(router, am)
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
"go.signoz.io/signoz/pkg/http/render"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -59,6 +60,21 @@ type billingDetails struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ApplyLicenseRequest struct {
|
||||||
|
LicenseKey string `json:"key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ListLicenseResponse map[string]interface{}
|
||||||
|
|
||||||
|
func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse {
|
||||||
|
listLicenses := []ListLicenseResponse{}
|
||||||
|
|
||||||
|
for _, license := range licensesV3 {
|
||||||
|
listLicenses = append(listLicenses, license.Data)
|
||||||
|
}
|
||||||
|
return listLicenses
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
@@ -79,7 +95,7 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
license, apiError := ah.LM().ActivateV3(r.Context(), l.Key)
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
RespondError(w, apiError, nil)
|
RespondError(w, apiError, nil)
|
||||||
return
|
return
|
||||||
@@ -88,6 +104,68 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.Respond(w, license)
|
ah.Respond(w, license)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||||
|
licenses, apiError := ah.LM().GetLicensesV3(r.Context())
|
||||||
|
|
||||||
|
if apiError != nil {
|
||||||
|
RespondError(w, apiError, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||||
|
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// return 404 not found if there is no active license
|
||||||
|
if activeLicense == nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO deprecate this when we move away from key for stripe
|
||||||
|
activeLicense.Data["key"] = activeLicense.Key
|
||||||
|
render.Success(w, http.StatusOK, activeLicense.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this function is called by zeus when inserting licenses in the query-service
|
||||||
|
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var licenseKey ApplyLicenseRequest
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
|
||||||
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if licenseKey.LicenseKey == "" {
|
||||||
|
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
|
||||||
|
if apiError != nil {
|
||||||
|
RespondError(w, apiError, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusAccepted, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
apiError := ah.LM().RefreshLicense(r.Context())
|
||||||
|
if apiError != nil {
|
||||||
|
RespondError(w, apiError, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusNoContent, nil)
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
type checkoutResponse struct {
|
type checkoutResponse struct {
|
||||||
@@ -154,12 +232,38 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.Respond(w, billingResponse.Data)
|
ah.Respond(w, billingResponse.Data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||||
|
licensesV2 := []model.License{}
|
||||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
for _, l := range licenses {
|
||||||
if apiError != nil {
|
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||||
RespondError(w, apiError, nil)
|
if !ok {
|
||||||
|
planKeyFromPlanName = model.Basic
|
||||||
|
}
|
||||||
|
licenseV2 := model.License{
|
||||||
|
Key: l.Key,
|
||||||
|
ActivationId: "",
|
||||||
|
PlanDetails: "",
|
||||||
|
FeatureSet: l.Features,
|
||||||
|
ValidationMessage: "",
|
||||||
|
IsCurrent: l.IsCurrent,
|
||||||
|
LicensePlan: model.LicensePlan{
|
||||||
|
PlanKey: planKeyFromPlanName,
|
||||||
|
ValidFrom: l.ValidFrom,
|
||||||
|
ValidUntil: l.ValidUntil,
|
||||||
|
Status: l.Status},
|
||||||
|
}
|
||||||
|
licensesV2 = append(licensesV2, licenseV2)
|
||||||
}
|
}
|
||||||
|
return licensesV2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||||
|
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
|
||||||
|
if apierr != nil {
|
||||||
|
RespondError(w, apierr, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
licenses := convertLicenseV3ToLicenseV2(licensesV3)
|
||||||
|
|
||||||
resp := model.Licenses{
|
resp := model.Licenses{
|
||||||
TrialStart: -1,
|
TrialStart: -1,
|
||||||
|
|||||||
@@ -26,8 +26,9 @@ func NewDataConnector(
|
|||||||
dialTimeout time.Duration,
|
dialTimeout time.Duration,
|
||||||
cluster string,
|
cluster string,
|
||||||
useLogsNewSchema bool,
|
useLogsNewSchema bool,
|
||||||
|
useTraceNewSchema bool,
|
||||||
) *ClickhouseReader {
|
) *ClickhouseReader {
|
||||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema)
|
||||||
return &ClickhouseReader{
|
return &ClickhouseReader{
|
||||||
conn: ch.GetConn(),
|
conn: ch.GetConn(),
|
||||||
appdb: localDB,
|
appdb: localDB,
|
||||||
|
|||||||
@@ -29,16 +29,18 @@ import (
|
|||||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
"go.signoz.io/signoz/ee/query-service/rules"
|
"go.signoz.io/signoz/ee/query-service/rules"
|
||||||
|
"go.signoz.io/signoz/pkg/http/middleware"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/model"
|
|
||||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
"go.signoz.io/signoz/pkg/signoz"
|
||||||
|
"go.signoz.io/signoz/pkg/web"
|
||||||
|
|
||||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/usage"
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
@@ -62,6 +64,7 @@ import (
|
|||||||
const AppDbEngine = "sqlite"
|
const AppDbEngine = "sqlite"
|
||||||
|
|
||||||
type ServerOptions struct {
|
type ServerOptions struct {
|
||||||
|
SigNoz *signoz.SigNoz
|
||||||
PromConfigPath string
|
PromConfigPath string
|
||||||
SkipTopLvlOpsPath string
|
SkipTopLvlOpsPath string
|
||||||
HTTPHostPort string
|
HTTPHostPort string
|
||||||
@@ -78,6 +81,7 @@ type ServerOptions struct {
|
|||||||
Cluster string
|
Cluster string
|
||||||
GatewayUrl string
|
GatewayUrl string
|
||||||
UseLogsNewSchema bool
|
UseLogsNewSchema bool
|
||||||
|
UseTraceNewSchema bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP api service
|
// Server runs HTTP api service
|
||||||
@@ -108,25 +112,22 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
|||||||
|
|
||||||
// NewServer creates and initializes Server
|
// NewServer creates and initializes Server
|
||||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||||
|
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||||
|
|
||||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
localDB.SetMaxOpenConns(10)
|
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -134,7 +135,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initiate license manager
|
// initiate license manager
|
||||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -148,7 +149,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
if storage == "clickhouse" {
|
if storage == "clickhouse" {
|
||||||
zap.L().Info("Using ClickHouse as datastore ...")
|
zap.L().Info("Using ClickHouse as datastore ...")
|
||||||
qb := db.NewDataConnector(
|
qb := db.NewDataConnector(
|
||||||
localDB,
|
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
serverOptions.PromConfigPath,
|
serverOptions.PromConfigPath,
|
||||||
lm,
|
lm,
|
||||||
serverOptions.MaxIdleConns,
|
serverOptions.MaxIdleConns,
|
||||||
@@ -156,6 +157,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.DialTimeout,
|
serverOptions.DialTimeout,
|
||||||
serverOptions.Cluster,
|
serverOptions.Cluster,
|
||||||
serverOptions.UseLogsNewSchema,
|
serverOptions.UseLogsNewSchema,
|
||||||
|
serverOptions.UseTraceNewSchema,
|
||||||
)
|
)
|
||||||
go qb.Start(readerReady)
|
go qb.Start(readerReady)
|
||||||
reader = qb
|
reader = qb
|
||||||
@@ -183,41 +185,42 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||||
baseconst.GetAlertManagerApiPrefix(),
|
baseconst.GetAlertManagerApiPrefix(),
|
||||||
serverOptions.RuleRepoURL,
|
serverOptions.RuleRepoURL,
|
||||||
localDB,
|
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
reader,
|
reader,
|
||||||
c,
|
c,
|
||||||
serverOptions.DisableRules,
|
serverOptions.DisableRules,
|
||||||
lm,
|
lm,
|
||||||
serverOptions.UseLogsNewSchema,
|
serverOptions.UseLogsNewSchema,
|
||||||
|
serverOptions.UseTraceNewSchema,
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
|
||||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// initiate opamp
|
// initiate opamp
|
||||||
_, err = opAmpModel.InitDB(localDB)
|
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
integrationsController, err := integrations.NewController(localDB)
|
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"couldn't create integrations controller: %w", err,
|
"couldn't create integrations controller: %w", err,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"couldn't create cloud provider integrations controller: %w", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// ingestion pipelines manager
|
// ingestion pipelines manager
|
||||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||||
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -225,8 +228,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
// initiate agent config handler
|
// initiate agent config handler
|
||||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||||
DB: localDB,
|
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
DBEngine: AppDbEngine,
|
|
||||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -234,7 +236,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// start the usagemanager
|
// start the usagemanager
|
||||||
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
|
usageManager, err := usage.New(modelDao, lm.GetRepo(), reader.GetConn())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -265,11 +267,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
FeatureFlags: lm,
|
FeatureFlags: lm,
|
||||||
LicenseManager: lm,
|
LicenseManager: lm,
|
||||||
IntegrationsController: integrationsController,
|
IntegrationsController: integrationsController,
|
||||||
|
CloudIntegrationsController: cloudIntegrationsController,
|
||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
Cache: c,
|
Cache: c,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
Gateway: gatewayProxy,
|
Gateway: gatewayProxy,
|
||||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||||
|
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||||
@@ -286,7 +290,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
usageManager: usageManager,
|
usageManager: usageManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
httpServer, err := s.createPublicServer(apiHandler)
|
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -312,10 +316,10 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
|||||||
|
|
||||||
r := baseapp.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
|
|
||||||
r.Use(baseapp.LogCommentEnricher)
|
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddlewarePrivate)
|
r.Use(middleware.NewLogging(zap.L()).Wrap)
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
|
|
||||||
apiHandler.RegisterPrivateRoutes(r)
|
apiHandler.RegisterPrivateRoutes(r)
|
||||||
|
|
||||||
@@ -335,7 +339,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||||
|
|
||||||
r := baseapp.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
|
|
||||||
@@ -348,21 +352,22 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if user.User.OrgId == "" {
|
if user.User.OrgId == "" {
|
||||||
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return user, nil
|
return user, nil
|
||||||
}
|
}
|
||||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||||
|
|
||||||
r.Use(baseapp.LogCommentEnricher)
|
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddleware)
|
r.Use(middleware.NewLogging(zap.L()).Wrap)
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
|
|
||||||
apiHandler.RegisterRoutes(r, am)
|
apiHandler.RegisterRoutes(r, am)
|
||||||
apiHandler.RegisterLogsRoutes(r, am)
|
apiHandler.RegisterLogsRoutes(r, am)
|
||||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||||
|
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||||
@@ -379,36 +384,16 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
|||||||
|
|
||||||
handler = handlers.CompressHandler(handler)
|
handler = handlers.CompressHandler(handler)
|
||||||
|
|
||||||
|
err := web.AddToRouter(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &http.Server{
|
return &http.Server{
|
||||||
Handler: handler,
|
Handler: handler,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
|
||||||
// loggingMiddleware is used for logging public api calls
|
|
||||||
func loggingMiddleware(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
route := mux.CurrentRoute(r)
|
|
||||||
path, _ := route.GetPathTemplate()
|
|
||||||
startTime := time.Now()
|
|
||||||
next.ServeHTTP(w, r)
|
|
||||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
|
||||||
// loggingMiddlewarePrivate is used for logging private api calls
|
|
||||||
// from internal services like alert manager
|
|
||||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
route := mux.CurrentRoute(r)
|
|
||||||
path, _ := route.GetPathTemplate()
|
|
||||||
startTime := time.Now()
|
|
||||||
next.ServeHTTP(w, r)
|
|
||||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
type loggingResponseWriter struct {
|
type loggingResponseWriter struct {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
@@ -488,32 +473,29 @@ func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}
|
|||||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
signozMetricsUsed := false
|
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||||
signozLogsUsed := false
|
|
||||||
signozTracesUsed := false
|
|
||||||
if postData != nil {
|
|
||||||
|
|
||||||
if postData.CompositeQuery != nil {
|
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||||
data["queryType"] = postData.CompositeQuery.QueryType
|
if queryInfoResult.MetricsUsed {
|
||||||
data["panelType"] = postData.CompositeQuery.PanelType
|
|
||||||
|
|
||||||
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
|
||||||
if signozMetricsUsed {
|
|
||||||
telemetry.GetInstance().AddActiveMetricsUser()
|
telemetry.GetInstance().AddActiveMetricsUser()
|
||||||
}
|
}
|
||||||
if signozLogsUsed {
|
if queryInfoResult.LogsUsed {
|
||||||
telemetry.GetInstance().AddActiveLogsUser()
|
telemetry.GetInstance().AddActiveLogsUser()
|
||||||
}
|
}
|
||||||
if signozTracesUsed {
|
if queryInfoResult.TracesUsed {
|
||||||
telemetry.GetInstance().AddActiveTracesUser()
|
telemetry.GetInstance().AddActiveTracesUser()
|
||||||
}
|
}
|
||||||
data["metricsUsed"] = signozMetricsUsed
|
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||||
data["logsUsed"] = signozLogsUsed
|
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||||
data["tracesUsed"] = signozTracesUsed
|
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||||
|
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||||
|
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||||
|
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||||
|
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||||
|
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||||
|
data["queryType"] = queryInfoResult.QueryType
|
||||||
|
data["panelType"] = queryInfoResult.PanelType
|
||||||
|
|
||||||
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// switch case to set data["screen"] based on the referrer
|
// switch case to set data["screen"] based on the referrer
|
||||||
@@ -736,7 +718,8 @@ func makeRulesManager(
|
|||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
disableRules bool,
|
disableRules bool,
|
||||||
fm baseint.FeatureLookup,
|
fm baseint.FeatureLookup,
|
||||||
useLogsNewSchema bool) (*baserules.Manager, error) {
|
useLogsNewSchema bool,
|
||||||
|
useTraceNewSchema bool) (*baserules.Manager, error) {
|
||||||
|
|
||||||
// create engine
|
// create engine
|
||||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||||
@@ -765,8 +748,10 @@ func makeRulesManager(
|
|||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: baseconst.GetEvalDelay(),
|
EvalDelay: baseconst.GetEvalDelay(),
|
||||||
|
|
||||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
UseLogsNewSchema: useLogsNewSchema,
|
||||||
|
UseTraceNewSchema: useTraceNewSchema,
|
||||||
|
PrepareTestRuleFunc: rules.TestNotification,
|
||||||
}
|
}
|
||||||
|
|
||||||
// create Manager
|
// create Manager
|
||||||
|
|||||||
@@ -14,6 +14,9 @@ var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
|||||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||||
|
|
||||||
|
// this is set via build time variable
|
||||||
|
var ZeusURL = "https://api.signoz.cloud"
|
||||||
|
|
||||||
func GetOrDefaultEnv(key string, fallback string) string {
|
func GetOrDefaultEnv(key string, fallback string) string {
|
||||||
v := os.Getenv(key)
|
v := os.Getenv(key)
|
||||||
if len(v) == 0 {
|
if len(v) == 0 {
|
||||||
|
|||||||
@@ -1,18 +1,10 @@
|
|||||||
package dao
|
package dao
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||||
)
|
)
|
||||||
|
|
||||||
func InitDao(engine, path string) (ModelDao, error) {
|
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
|
||||||
|
return sqlite.InitDB(inputDB)
|
||||||
switch engine {
|
|
||||||
case "sqlite":
|
|
||||||
return sqlite.InitDB(path)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -65,8 +65,8 @@ func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InitDB creates and extends base model DB repository
|
// InitDB creates and extends base model DB repository
|
||||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
|
||||||
dao, err := basedsql.InitDB(dataSourceName)
|
dao, err := basedsql.InitDB(inputDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
RoutePrefix string = "/api/gateway"
|
RoutePrefix string = "/api/gateway"
|
||||||
AllowedPrefix []string = []string{"/v1/workspaces/me", "/v2/profiles/me"}
|
AllowedPrefix []string = []string{"/v1/workspaces/me", "/v2/profiles/me", "/v2/deployments/me"}
|
||||||
)
|
)
|
||||||
|
|
||||||
type proxy struct {
|
type proxy struct {
|
||||||
|
|||||||
@@ -2,14 +2,7 @@ package signozio
|
|||||||
|
|
||||||
type status string
|
type status string
|
||||||
|
|
||||||
type ActivationResult struct {
|
type ValidateLicenseResponse struct {
|
||||||
Status status `json:"status"`
|
Status status `json:"status"`
|
||||||
Data *ActivationResponse `json:"data,omitempty"`
|
Data map[string]interface{} `json:"data"`
|
||||||
ErrorType string `json:"errorType,omitempty"`
|
|
||||||
Error string `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ActivationResponse struct {
|
|
||||||
ActivationId string `json:"ActivationId"`
|
|
||||||
PlanDetails string `json:"PlanDetails"`
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
@@ -23,12 +23,14 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
|
GatewayUrl string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New() *Client {
|
func New() *Client {
|
||||||
return &Client{
|
return &Client{
|
||||||
Prefix: constants.LicenseSignozIo,
|
Prefix: constants.LicenseSignozIo,
|
||||||
|
GatewayUrl: constants.ZeusURL,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,82 +38,56 @@ func init() {
|
|||||||
C = New()
|
C = New()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActivateLicense sends key to license.signoz.io and gets activation data
|
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
|
||||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
|
|
||||||
licenseReq := map[string]string{
|
// Creating an HTTP client with a timeout for better control
|
||||||
"key": key,
|
client := &http.Client{
|
||||||
"siteId": siteId,
|
Timeout: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
reqString, _ := json.Marshal(licenseReq)
|
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
|
||||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
|
||||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
httpBody, err := io.ReadAll(httpResponse.Body)
|
// Setting the custom header
|
||||||
|
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||||
|
|
||||||
|
response, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
|
||||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
|
||||||
}
|
|
||||||
|
|
||||||
defer httpResponse.Body.Close()
|
|
||||||
|
|
||||||
// read api request result
|
|
||||||
result := ActivationResult{}
|
|
||||||
err = json.Unmarshal(httpBody, &result)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
|
||||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch httpResponse.StatusCode {
|
|
||||||
case 200, 201:
|
|
||||||
return result.Data, nil
|
|
||||||
case 400, 401:
|
|
||||||
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
|
||||||
default:
|
|
||||||
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLicense validates the license key
|
|
||||||
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
|
|
||||||
validReq := map[string]string{
|
|
||||||
"activationId": activationId,
|
|
||||||
}
|
|
||||||
|
|
||||||
reqString, _ := json.Marshal(validReq)
|
|
||||||
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := io.ReadAll(response.Body)
|
body, err := io.ReadAll(response.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
switch response.StatusCode {
|
switch response.StatusCode {
|
||||||
case 200, 201:
|
case 200:
|
||||||
a := ActivationResult{}
|
a := ValidateLicenseResponse{}
|
||||||
err = json.Unmarshal(body, &a)
|
err = json.Unmarshal(body, &a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||||
}
|
}
|
||||||
return a.Data, nil
|
|
||||||
case 400, 401:
|
license, err := model.NewLicenseV3(a.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return license, nil
|
||||||
|
case 400:
|
||||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
"bad request error received from license.signoz.io"))
|
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
|
||||||
|
case 401:
|
||||||
|
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
|
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
|
||||||
default:
|
default:
|
||||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
"internal error received from license.signoz.io"))
|
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,12 @@ package license
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
"github.com/mattn/go-sqlite3"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/license/sqlite"
|
"go.signoz.io/signoz/ee/query-service/license/sqlite"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
@@ -26,13 +28,8 @@ func NewLicenseRepo(db *sqlx.DB) Repo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repo) InitDB(engine string) error {
|
func (r *Repo) InitDB(inputDB *sqlx.DB) error {
|
||||||
switch engine {
|
return sqlite.InitDB(inputDB)
|
||||||
case "sqlite3", "sqlite":
|
|
||||||
return sqlite.InitDB(r.db)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported db")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
||||||
@@ -48,9 +45,35 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
|||||||
return licenses, nil
|
return licenses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveLicense fetches the latest active license from DB.
|
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
|
||||||
// If the license is not present, expect a nil license and a nil error in the output.
|
licensesData := []model.LicenseDB{}
|
||||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
licenseV3Data := []*model.LicenseV3{}
|
||||||
|
|
||||||
|
query := "SELECT id,key,data FROM licenses_v3"
|
||||||
|
|
||||||
|
err := r.db.Select(&licensesData, query)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range licensesData {
|
||||||
|
var licenseData map[string]interface{}
|
||||||
|
err := json.Unmarshal([]byte(l.Data), &licenseData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
|
||||||
|
}
|
||||||
|
licenseV3Data = append(licenseV3Data, license)
|
||||||
|
}
|
||||||
|
|
||||||
|
return licenseV3Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repo) GetActiveLicenseV2(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||||
var err error
|
var err error
|
||||||
licenses := []model.License{}
|
licenses := []model.License{}
|
||||||
|
|
||||||
@@ -79,6 +102,60 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel
|
|||||||
return active, nil
|
return active, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetActiveLicense fetches the latest active license from DB.
|
||||||
|
// If the license is not present, expect a nil license and a nil error in the output.
|
||||||
|
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||||
|
activeLicenseV3, err := r.GetActiveLicenseV3(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if activeLicenseV3 == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
|
||||||
|
return activeLicenseV2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
|
||||||
|
var err error
|
||||||
|
licenses := []model.LicenseDB{}
|
||||||
|
|
||||||
|
query := "SELECT id,key,data FROM licenses_v3"
|
||||||
|
|
||||||
|
err = r.db.Select(&licenses, query)
|
||||||
|
if err != nil {
|
||||||
|
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
var active *model.LicenseV3
|
||||||
|
for _, l := range licenses {
|
||||||
|
var licenseData map[string]interface{}
|
||||||
|
err := json.Unmarshal([]byte(l.Data), &licenseData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if active == nil &&
|
||||||
|
(license.ValidFrom != 0) &&
|
||||||
|
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
|
||||||
|
active = license
|
||||||
|
}
|
||||||
|
if active != nil &&
|
||||||
|
license.ValidFrom > active.ValidFrom &&
|
||||||
|
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
|
||||||
|
active = license
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return active, nil
|
||||||
|
}
|
||||||
|
|
||||||
// InsertLicense inserts a new license in db
|
// InsertLicense inserts a new license in db
|
||||||
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
||||||
|
|
||||||
@@ -204,3 +281,59 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InsertLicenseV3 inserts a new license v3 in db
|
||||||
|
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
|
||||||
|
|
||||||
|
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
|
||||||
|
|
||||||
|
// licsense is the entity of zeus so putting the entire license here without defining schema
|
||||||
|
licenseData, err := json.Marshal(l.Data)
|
||||||
|
if err != nil {
|
||||||
|
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = r.db.ExecContext(ctx,
|
||||||
|
query,
|
||||||
|
l.ID,
|
||||||
|
l.Key,
|
||||||
|
string(licenseData),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if sqliteErr, ok := err.(sqlite3.Error); ok {
|
||||||
|
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
|
||||||
|
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
|
||||||
|
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
zap.L().Error("error in inserting license data: ", zap.Error(err))
|
||||||
|
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLicenseV3 updates a new license v3 in db
|
||||||
|
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
|
||||||
|
|
||||||
|
// the key and id for the license can't change so only update the data here!
|
||||||
|
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
|
||||||
|
|
||||||
|
license, err := json.Marshal(l.Data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("insert license failed: license marshal error")
|
||||||
|
}
|
||||||
|
_, err = r.db.ExecContext(ctx,
|
||||||
|
query,
|
||||||
|
license,
|
||||||
|
l.ID,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error in updating license data: ", zap.Error(err))
|
||||||
|
return fmt.Errorf("failed to update license in db: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -45,17 +46,18 @@ type Manager struct {
|
|||||||
failedAttempts uint64
|
failedAttempts uint64
|
||||||
|
|
||||||
// keep track of active license and features
|
// keep track of active license and features
|
||||||
activeLicense *model.License
|
activeLicense *model.License
|
||||||
activeFeatures basemodel.FeatureSet
|
activeLicenseV3 *model.LicenseV3
|
||||||
|
activeFeatures basemodel.FeatureSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
func StartManager(db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||||
if LM != nil {
|
if LM != nil {
|
||||||
return LM, nil
|
return LM, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
repo := NewLicenseRepo(db)
|
repo := NewLicenseRepo(db)
|
||||||
err := repo.InitDB(dbType)
|
err := repo.InitDB(db)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
|
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
|
||||||
@@ -74,9 +76,7 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
|
|||||||
|
|
||||||
// start loads active license in memory and initiates validator
|
// start loads active license in memory and initiates validator
|
||||||
func (lm *Manager) start(features ...basemodel.Feature) error {
|
func (lm *Manager) start(features ...basemodel.Feature) error {
|
||||||
err := lm.LoadActiveLicense(features...)
|
return lm.LoadActiveLicenseV3(features...)
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) Stop() {
|
func (lm *Manager) Stop() {
|
||||||
@@ -84,7 +84,7 @@ func (lm *Manager) Stop() {
|
|||||||
<-lm.terminated
|
<-lm.terminated
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
|
||||||
lm.mutex.Lock()
|
lm.mutex.Lock()
|
||||||
defer lm.mutex.Unlock()
|
defer lm.mutex.Unlock()
|
||||||
|
|
||||||
@@ -92,8 +92,8 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lm.activeLicense = l
|
lm.activeLicenseV3 = l
|
||||||
lm.activeFeatures = append(l.FeatureSet, features...)
|
lm.activeFeatures = append(l.Features, features...)
|
||||||
// set default features
|
// set default features
|
||||||
setDefaultFeatures(lm)
|
setDefaultFeatures(lm)
|
||||||
|
|
||||||
@@ -105,7 +105,7 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
|||||||
// we want to make sure only one validator runs,
|
// we want to make sure only one validator runs,
|
||||||
// we already have lock() so good to go
|
// we already have lock() so good to go
|
||||||
lm.validatorRunning = true
|
lm.validatorRunning = true
|
||||||
go lm.Validator(context.Background())
|
go lm.ValidatorV3(context.Background())
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -114,14 +114,13 @@ func setDefaultFeatures(lm *Manager) {
|
|||||||
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadActiveLicense loads the most recent active license
|
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
|
||||||
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
active, err := lm.repo.GetActiveLicenseV3(context.Background())
|
||||||
active, err := lm.repo.GetActiveLicense(context.Background())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if active != nil {
|
if active != nil {
|
||||||
lm.SetActive(active, features...)
|
lm.SetActiveV3(active, features...)
|
||||||
} else {
|
} else {
|
||||||
zap.L().Info("No active license found, defaulting to basic plan")
|
zap.L().Info("No active license found, defaulting to basic plan")
|
||||||
// if no active license is found, we default to basic(free) plan with all default features
|
// if no active license is found, we default to basic(free) plan with all default features
|
||||||
@@ -163,13 +162,36 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
|
||||||
|
|
||||||
|
licenses, err := lm.repo.GetLicensesV3(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, model.InternalError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range licenses {
|
||||||
|
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
|
||||||
|
l.IsCurrent = true
|
||||||
|
}
|
||||||
|
if l.ValidUntil == -1 {
|
||||||
|
// for subscriptions, there is no end-date as such
|
||||||
|
// but for showing user some validity we default one year timespan
|
||||||
|
l.ValidUntil = l.ValidFrom + 31556926
|
||||||
|
}
|
||||||
|
response = append(response, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Validator validates license after an epoch of time
|
// Validator validates license after an epoch of time
|
||||||
func (lm *Manager) Validator(ctx context.Context) {
|
func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||||
|
zap.L().Info("ValidatorV3 started!")
|
||||||
defer close(lm.terminated)
|
defer close(lm.terminated)
|
||||||
tick := time.NewTicker(validationFrequency)
|
tick := time.NewTicker(validationFrequency)
|
||||||
defer tick.Stop()
|
defer tick.Stop()
|
||||||
|
|
||||||
lm.Validate(ctx)
|
lm.ValidateV3(ctx)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -180,17 +202,33 @@ func (lm *Manager) Validator(ctx context.Context) {
|
|||||||
case <-lm.done:
|
case <-lm.done:
|
||||||
return
|
return
|
||||||
case <-tick.C:
|
case <-tick.C:
|
||||||
lm.Validate(ctx)
|
lm.ValidateV3(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates the current active license
|
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|
||||||
|
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
|
||||||
|
if apiError != nil {
|
||||||
|
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||||
|
return apiError
|
||||||
|
}
|
||||||
|
|
||||||
|
err := lm.repo.UpdateLicenseV3(ctx, license)
|
||||||
|
if err != nil {
|
||||||
|
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
|
||||||
|
}
|
||||||
|
lm.SetActiveV3(license)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
|
||||||
zap.L().Info("License validation started")
|
zap.L().Info("License validation started")
|
||||||
if lm.activeLicense == nil {
|
if lm.activeLicenseV3 == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -210,52 +248,15 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
lm.mutex.Unlock()
|
lm.mutex.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
err := lm.RefreshLicense(ctx)
|
||||||
if apiError != nil {
|
|
||||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
if err != nil {
|
||||||
return apiError.Err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
|
||||||
// license plan hasnt changed, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.PlanDetails != "" {
|
|
||||||
|
|
||||||
// copy and replace the active license record
|
|
||||||
l := model.License{
|
|
||||||
Key: lm.activeLicense.Key,
|
|
||||||
CreatedAt: lm.activeLicense.CreatedAt,
|
|
||||||
PlanDetails: response.PlanDetails,
|
|
||||||
ValidationMessage: lm.activeLicense.ValidationMessage,
|
|
||||||
ActivationId: lm.activeLicense.ActivationId,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.ParsePlan(); err != nil {
|
|
||||||
zap.L().Error("failed to parse updated license", zap.Error(err))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// updated plan is parsable, check if plan has changed
|
|
||||||
if lm.activeLicense.PlanDetails != response.PlanDetails {
|
|
||||||
err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
|
|
||||||
if err != nil {
|
|
||||||
// unexpected db write issue but we can let the user continue
|
|
||||||
// and wait for update to work in next cycle.
|
|
||||||
zap.L().Error("failed to validate license", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// activate the update license plan
|
|
||||||
lm.SetActive(&l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Activate activates a license key with signoz server
|
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
|
||||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if errResponse != nil {
|
if errResponse != nil {
|
||||||
userEmail, err := auth.GetEmailFromJwt(ctx)
|
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||||
@@ -266,36 +267,22 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
response, apiError := validate.ActivateLicense(key, "")
|
license, apiError := validate.ValidateLicenseV3(licenseKey)
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
|
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
|
||||||
return nil, apiError
|
return nil, apiError
|
||||||
}
|
}
|
||||||
|
|
||||||
l := &model.License{
|
// insert the new license to the sqlite db
|
||||||
Key: key,
|
err := lm.repo.InsertLicenseV3(ctx, license)
|
||||||
ActivationId: response.ActivationId,
|
|
||||||
PlanDetails: response.PlanDetails,
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse validity and features from the plan details
|
|
||||||
err := l.ParsePlan()
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, err
|
||||||
}
|
|
||||||
|
|
||||||
// store the license before activating it
|
|
||||||
err = lm.repo.InsertLicense(ctx, l)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Error("failed to activate license", zap.Error(err))
|
|
||||||
return nil, model.InternalError(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// license is valid, activate it
|
// license is valid, activate it
|
||||||
lm.SetActive(l)
|
lm.SetActiveV3(license)
|
||||||
return l, nil
|
return license, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckFeature will be internally used by backend routines
|
// CheckFeature will be internally used by backend routines
|
||||||
|
|||||||
@@ -48,5 +48,16 @@ func InitDB(db *sqlx.DB) error {
|
|||||||
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
|
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table_schema = `CREATE TABLE IF NOT EXISTS licenses_v3 (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
key TEXT NOT NULL UNIQUE,
|
||||||
|
data TEXT
|
||||||
|
);`
|
||||||
|
|
||||||
|
_, err = db.Exec(table_schema)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error in creating licenses_v3 table: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,10 +13,14 @@ import (
|
|||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||||
"go.signoz.io/signoz/ee/query-service/app"
|
"go.signoz.io/signoz/ee/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/config"
|
||||||
|
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||||
|
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||||
"go.signoz.io/signoz/pkg/query-service/version"
|
"go.signoz.io/signoz/pkg/query-service/version"
|
||||||
|
"go.signoz.io/signoz/pkg/signoz"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
|
||||||
@@ -94,6 +98,7 @@ func main() {
|
|||||||
var cluster string
|
var cluster string
|
||||||
|
|
||||||
var useLogsNewSchema bool
|
var useLogsNewSchema bool
|
||||||
|
var useTraceNewSchema bool
|
||||||
var cacheConfigPath, fluxInterval string
|
var cacheConfigPath, fluxInterval string
|
||||||
var enableQueryServiceLogOTLPExport bool
|
var enableQueryServiceLogOTLPExport bool
|
||||||
var preferSpanMetrics bool
|
var preferSpanMetrics bool
|
||||||
@@ -102,8 +107,10 @@ func main() {
|
|||||||
var maxOpenConns int
|
var maxOpenConns int
|
||||||
var dialTimeout time.Duration
|
var dialTimeout time.Duration
|
||||||
var gatewayUrl string
|
var gatewayUrl string
|
||||||
|
var useLicensesV3 bool
|
||||||
|
|
||||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||||
|
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
@@ -117,7 +124,7 @@ func main() {
|
|||||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||||
|
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||||
@@ -127,7 +134,24 @@ func main() {
|
|||||||
|
|
||||||
version.PrintVersion()
|
version.PrintVersion()
|
||||||
|
|
||||||
|
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||||
|
Uris: []string{"env:"},
|
||||||
|
ProviderFactories: []config.ProviderFactory{
|
||||||
|
envprovider.NewFactory(),
|
||||||
|
fileprovider.NewFactory(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
serverOptions := &app.ServerOptions{
|
serverOptions := &app.ServerOptions{
|
||||||
|
SigNoz: signoz,
|
||||||
HTTPHostPort: baseconst.HTTPHostPort,
|
HTTPHostPort: baseconst.HTTPHostPort,
|
||||||
PromConfigPath: promConfigPath,
|
PromConfigPath: promConfigPath,
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||||
@@ -143,6 +167,7 @@ func main() {
|
|||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
GatewayUrl: gatewayUrl,
|
GatewayUrl: gatewayUrl,
|
||||||
UseLogsNewSchema: useLogsNewSchema,
|
UseLogsNewSchema: useLogsNewSchema,
|
||||||
|
UseTraceNewSchema: useTraceNewSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the jwt secret key
|
// Read the jwt secret key
|
||||||
@@ -154,7 +179,7 @@ func main() {
|
|||||||
zap.L().Info("JWT secret key set successfully.")
|
zap.L().Info("JWT secret key set successfully.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
if err := migrate.Migrate(signoz.SQLStore.SQLxDB()); err != nil {
|
||||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
zap.L().Info("Migration successful")
|
zap.L().Info("Migration successful")
|
||||||
|
|||||||
@@ -46,6 +46,13 @@ func BadRequest(err error) *ApiError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Unauthorized(err error) *ApiError {
|
||||||
|
return &ApiError{
|
||||||
|
Typ: basemodel.ErrorUnauthorized,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// BadRequestStr returns a ApiError object of bad request for string input
|
// BadRequestStr returns a ApiError object of bad request for string input
|
||||||
func BadRequestStr(s string) *ApiError {
|
func BadRequestStr(s string) *ApiError {
|
||||||
return &ApiError{
|
return &ApiError{
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ package model
|
|||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -104,3 +106,165 @@ type SubscriptionServerResp struct {
|
|||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Data Licenses `json:"data"`
|
Data Licenses `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Plan struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type LicenseDB struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
Data string `json:"data"`
|
||||||
|
}
|
||||||
|
type LicenseV3 struct {
|
||||||
|
ID string
|
||||||
|
Key string
|
||||||
|
Data map[string]interface{}
|
||||||
|
PlanName string
|
||||||
|
Features basemodel.FeatureSet
|
||||||
|
Status string
|
||||||
|
IsCurrent bool
|
||||||
|
ValidFrom int64
|
||||||
|
ValidUntil int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
|
||||||
|
var zeroValue T
|
||||||
|
if val, ok := data[key]; ok {
|
||||||
|
if value, ok := val.(T); ok {
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
|
||||||
|
}
|
||||||
|
return zeroValue, fmt.Errorf("%s key is missing", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||||
|
var features basemodel.FeatureSet
|
||||||
|
|
||||||
|
// extract id from data
|
||||||
|
licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
delete(data, "id")
|
||||||
|
|
||||||
|
// extract key from data
|
||||||
|
licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
delete(data, "key")
|
||||||
|
|
||||||
|
// extract status from data
|
||||||
|
status, err := extractKeyFromMapStringInterface[string](data, "status")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// if license status is inactive then default it to basic
|
||||||
|
if status == LicenseStatusInactive {
|
||||||
|
planName = PlanNameBasic
|
||||||
|
}
|
||||||
|
|
||||||
|
featuresFromZeus := basemodel.FeatureSet{}
|
||||||
|
if _features, ok := data["features"]; ok {
|
||||||
|
featuresData, err := json.Marshal(_features)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to marshal features data")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to unmarshal features data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch planName {
|
||||||
|
case PlanNameTeams:
|
||||||
|
features = append(features, ProPlan...)
|
||||||
|
case PlanNameEnterprise:
|
||||||
|
features = append(features, EnterprisePlan...)
|
||||||
|
case PlanNameBasic:
|
||||||
|
features = append(features, BasicPlan...)
|
||||||
|
default:
|
||||||
|
features = append(features, BasicPlan...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(featuresFromZeus) > 0 {
|
||||||
|
for _, feature := range featuresFromZeus {
|
||||||
|
exists := false
|
||||||
|
for i, existingFeature := range features {
|
||||||
|
if existingFeature.Name == feature.Name {
|
||||||
|
features[i] = feature // Replace existing feature
|
||||||
|
exists = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
features = append(features, feature) // Append if it doesn't exist
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data["features"] = features
|
||||||
|
|
||||||
|
_validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
|
||||||
|
if err != nil {
|
||||||
|
_validFrom = 0
|
||||||
|
}
|
||||||
|
validFrom := int64(_validFrom)
|
||||||
|
|
||||||
|
_validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
|
||||||
|
if err != nil {
|
||||||
|
_validUntil = 0
|
||||||
|
}
|
||||||
|
validUntil := int64(_validUntil)
|
||||||
|
|
||||||
|
return &LicenseV3{
|
||||||
|
ID: licenseID,
|
||||||
|
Key: licenseKey,
|
||||||
|
Data: data,
|
||||||
|
PlanName: planName,
|
||||||
|
Features: features,
|
||||||
|
ValidFrom: validFrom,
|
||||||
|
ValidUntil: validUntil,
|
||||||
|
Status: status,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
|
||||||
|
licenseDataWithIdAndKey := data
|
||||||
|
licenseDataWithIdAndKey["id"] = id
|
||||||
|
licenseDataWithIdAndKey["key"] = key
|
||||||
|
return NewLicenseV3(licenseDataWithIdAndKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
|
||||||
|
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||||
|
if !ok {
|
||||||
|
planKeyFromPlanName = Basic
|
||||||
|
}
|
||||||
|
return &License{
|
||||||
|
Key: l.Key,
|
||||||
|
ActivationId: "",
|
||||||
|
PlanDetails: "",
|
||||||
|
FeatureSet: l.Features,
|
||||||
|
ValidationMessage: "",
|
||||||
|
IsCurrent: l.IsCurrent,
|
||||||
|
LicensePlan: LicensePlan{
|
||||||
|
PlanKey: planKeyFromPlanName,
|
||||||
|
ValidFrom: l.ValidFrom,
|
||||||
|
ValidUntil: l.ValidUntil,
|
||||||
|
Status: l.Status},
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
170
ee/query-service/model/license_test.go
Normal file
170
ee/query-service/model/license_test.go
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewLicenseV3(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
pass bool
|
||||||
|
expected *LicenseV3
|
||||||
|
error error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Error for missing license id",
|
||||||
|
data: []byte(`{}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("id key is missing"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for license id not being a valid string",
|
||||||
|
data: []byte(`{"id": 10}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("id key is not a valid string"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for missing license key",
|
||||||
|
data: []byte(`{"id":"does-not-matter"}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("key key is missing"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for invalid string license key",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":10}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("key key is not a valid string"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for missing license status",
|
||||||
|
data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("status key is missing"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for invalid string license status",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("status key is not a valid string"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for missing license plan",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("plan key is missing"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for invalid json license plan",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("plan key is not a valid map[string]interface {}"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error for invalid license plan",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
|
||||||
|
pass: false,
|
||||||
|
error: errors.New("name key is missing"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Parse the entire license properly",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||||
|
pass: true,
|
||||||
|
expected: &LicenseV3{
|
||||||
|
ID: "does-not-matter",
|
||||||
|
Key: "does-not-matter-key",
|
||||||
|
Data: map[string]interface{}{
|
||||||
|
"plan": map[string]interface{}{
|
||||||
|
"name": "TEAMS",
|
||||||
|
},
|
||||||
|
"category": "FREE",
|
||||||
|
"status": "ACTIVE",
|
||||||
|
"valid_from": float64(1730899309),
|
||||||
|
"valid_until": float64(-1),
|
||||||
|
},
|
||||||
|
PlanName: PlanNameTeams,
|
||||||
|
ValidFrom: 1730899309,
|
||||||
|
ValidUntil: -1,
|
||||||
|
Status: "ACTIVE",
|
||||||
|
IsCurrent: false,
|
||||||
|
Features: model.FeatureSet{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Fallback to basic plan if license status is inactive",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||||
|
pass: true,
|
||||||
|
expected: &LicenseV3{
|
||||||
|
ID: "does-not-matter",
|
||||||
|
Key: "does-not-matter-key",
|
||||||
|
Data: map[string]interface{}{
|
||||||
|
"plan": map[string]interface{}{
|
||||||
|
"name": "TEAMS",
|
||||||
|
},
|
||||||
|
"category": "FREE",
|
||||||
|
"status": "INACTIVE",
|
||||||
|
"valid_from": float64(1730899309),
|
||||||
|
"valid_until": float64(-1),
|
||||||
|
},
|
||||||
|
PlanName: PlanNameBasic,
|
||||||
|
ValidFrom: 1730899309,
|
||||||
|
ValidUntil: -1,
|
||||||
|
Status: "INACTIVE",
|
||||||
|
IsCurrent: false,
|
||||||
|
Features: model.FeatureSet{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fallback states for validFrom and validUntil",
|
||||||
|
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||||
|
pass: true,
|
||||||
|
expected: &LicenseV3{
|
||||||
|
ID: "does-not-matter",
|
||||||
|
Key: "does-not-matter-key",
|
||||||
|
Data: map[string]interface{}{
|
||||||
|
"plan": map[string]interface{}{
|
||||||
|
"name": "TEAMS",
|
||||||
|
},
|
||||||
|
"valid_from": 1234.456,
|
||||||
|
"valid_until": 5678.567,
|
||||||
|
"category": "FREE",
|
||||||
|
"status": "ACTIVE",
|
||||||
|
},
|
||||||
|
PlanName: PlanNameTeams,
|
||||||
|
ValidFrom: 1234,
|
||||||
|
ValidUntil: 5678,
|
||||||
|
Status: "ACTIVE",
|
||||||
|
IsCurrent: false,
|
||||||
|
Features: model.FeatureSet{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
var licensePayload map[string]interface{}
|
||||||
|
err := json.Unmarshal(tc.data, &licensePayload)
|
||||||
|
require.NoError(t, err)
|
||||||
|
license, err := NewLicenseV3(licensePayload)
|
||||||
|
if license != nil {
|
||||||
|
license.Features = make(model.FeatureSet, 0)
|
||||||
|
delete(license.Data, "features")
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.pass {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, license)
|
||||||
|
assert.Equal(t, tc.expected, license)
|
||||||
|
} else {
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.EqualError(t, err, tc.error.Error())
|
||||||
|
require.Nil(t, license)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -8,6 +9,21 @@ const SSO = "SSO"
|
|||||||
const Basic = "BASIC_PLAN"
|
const Basic = "BASIC_PLAN"
|
||||||
const Pro = "PRO_PLAN"
|
const Pro = "PRO_PLAN"
|
||||||
const Enterprise = "ENTERPRISE_PLAN"
|
const Enterprise = "ENTERPRISE_PLAN"
|
||||||
|
|
||||||
|
var (
|
||||||
|
PlanNameEnterprise = "ENTERPRISE"
|
||||||
|
PlanNameTeams = "TEAMS"
|
||||||
|
PlanNameBasic = "BASIC"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
LicenseStatusInactive = "INACTIVE"
|
||||||
|
)
|
||||||
|
|
||||||
const DisableUpsell = "DISABLE_UPSELL"
|
const DisableUpsell = "DISABLE_UPSELL"
|
||||||
const Onboarding = "ONBOARDING"
|
const Onboarding = "ONBOARDING"
|
||||||
const ChatSupport = "CHAT_SUPPORT"
|
const ChatSupport = "CHAT_SUPPORT"
|
||||||
@@ -134,6 +150,13 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.HostsInfraMonitoring,
|
||||||
|
Active: constants.EnableHostsInfraMonitoring(),
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var ProPlan = basemodel.FeatureSet{
|
var ProPlan = basemodel.FeatureSet{
|
||||||
@@ -249,6 +272,13 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.HostsInfraMonitoring,
|
||||||
|
Active: constants.EnableHostsInfraMonitoring(),
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var EnterprisePlan = basemodel.FeatureSet{
|
var EnterprisePlan = basemodel.FeatureSet{
|
||||||
@@ -373,7 +403,14 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AnomalyDetection,
|
Name: basemodel.AnomalyDetection,
|
||||||
Active: false,
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.HostsInfraMonitoring,
|
||||||
|
Active: constants.EnableHostsInfraMonitoring(),
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
|
|||||||
@@ -61,6 +61,11 @@ func NewAnomalyRule(
|
|||||||
|
|
||||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
||||||
|
|
||||||
|
if p.RuleCondition.CompareOp == baserules.ValueIsBelow {
|
||||||
|
target := -1 * *p.RuleCondition.Target
|
||||||
|
p.RuleCondition.Target = &target
|
||||||
|
}
|
||||||
|
|
||||||
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
|
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
package rules
|
package rules
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
|
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
|
||||||
@@ -21,6 +26,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
opts.FF,
|
opts.FF,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.UseLogsNewSchema,
|
opts.UseLogsNewSchema,
|
||||||
|
opts.UseTraceNewSchema,
|
||||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -79,6 +85,107 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
return task, nil
|
return task, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestNotification prepares a dummy rule for given rule parameters and
|
||||||
|
// sends a test notification. returns alert count and error (if any)
|
||||||
|
func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.ApiError) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if opts.Rule == nil {
|
||||||
|
return 0, basemodel.BadRequest(fmt.Errorf("rule is required"))
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedRule := opts.Rule
|
||||||
|
var alertname = parsedRule.AlertName
|
||||||
|
if alertname == "" {
|
||||||
|
// alertname is not mandatory for testing, so picking
|
||||||
|
// a random string here
|
||||||
|
alertname = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// append name to indicate this is test alert
|
||||||
|
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, baserules.TestAlertPostFix)
|
||||||
|
|
||||||
|
var rule baserules.Rule
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if parsedRule.RuleType == baserules.RuleTypeThreshold {
|
||||||
|
|
||||||
|
// add special labels for test alerts
|
||||||
|
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
|
||||||
|
parsedRule.Labels[labels.RuleSourceLabel] = ""
|
||||||
|
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
|
||||||
|
|
||||||
|
// create a threshold rule
|
||||||
|
rule, err = baserules.NewThresholdRule(
|
||||||
|
alertname,
|
||||||
|
parsedRule,
|
||||||
|
opts.FF,
|
||||||
|
opts.Reader,
|
||||||
|
opts.UseLogsNewSchema,
|
||||||
|
opts.UseTraceNewSchema,
|
||||||
|
baserules.WithSendAlways(),
|
||||||
|
baserules.WithSendUnmatched(),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
|
||||||
|
return 0, basemodel.BadRequest(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if parsedRule.RuleType == baserules.RuleTypeProm {
|
||||||
|
|
||||||
|
// create promql rule
|
||||||
|
rule, err = baserules.NewPromRule(
|
||||||
|
alertname,
|
||||||
|
parsedRule,
|
||||||
|
opts.Logger,
|
||||||
|
opts.Reader,
|
||||||
|
opts.ManagerOpts.PqlEngine,
|
||||||
|
baserules.WithSendAlways(),
|
||||||
|
baserules.WithSendUnmatched(),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
|
||||||
|
return 0, basemodel.BadRequest(err)
|
||||||
|
}
|
||||||
|
} else if parsedRule.RuleType == baserules.RuleTypeAnomaly {
|
||||||
|
// create anomaly rule
|
||||||
|
rule, err = NewAnomalyRule(
|
||||||
|
alertname,
|
||||||
|
parsedRule,
|
||||||
|
opts.FF,
|
||||||
|
opts.Reader,
|
||||||
|
opts.Cache,
|
||||||
|
baserules.WithSendAlways(),
|
||||||
|
baserules.WithSendUnmatched(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))
|
||||||
|
return 0, basemodel.BadRequest(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return 0, basemodel.BadRequest(fmt.Errorf("failed to derive ruletype with given information"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// set timestamp to current utc time
|
||||||
|
ts := time.Now().UTC()
|
||||||
|
|
||||||
|
count, err := rule.Eval(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
|
||||||
|
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
|
||||||
|
}
|
||||||
|
alertsFound, ok := count.(int)
|
||||||
|
if !ok {
|
||||||
|
return 0, basemodel.InternalError(fmt.Errorf("something went wrong"))
|
||||||
|
}
|
||||||
|
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)
|
||||||
|
|
||||||
|
return alertsFound, nil
|
||||||
|
}
|
||||||
|
|
||||||
// newTask returns an appropriate group for
|
// newTask returns an appropriate group for
|
||||||
// rule type
|
// rule type
|
||||||
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
|
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ type Manager struct {
|
|||||||
tenantID string
|
tenantID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||||
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
||||||
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
|
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
|
||||||
|
|
||||||
|
|||||||
@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
|
|||||||
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$branch" = "develop" ]; then
|
|
||||||
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
@@ -24,7 +24,7 @@ const config: Config.InitialOptions = {
|
|||||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
},
|
},
|
||||||
transformIgnorePatterns: [
|
transformIgnorePatterns: [
|
||||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
|
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color|api)/)',
|
||||||
],
|
],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||||
"commitlint": "commitlint --edit $1",
|
"commitlint": "commitlint --edit $1",
|
||||||
"test": "jest --coverage",
|
"test": "jest --coverage",
|
||||||
"test:changedsince": "jest --changedSince=develop --coverage --silent"
|
"test:changedsince": "jest --changedSince=main --coverage --silent"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16.15.0"
|
"node": ">=16.15.0"
|
||||||
@@ -34,15 +34,16 @@
|
|||||||
"@dnd-kit/core": "6.1.0",
|
"@dnd-kit/core": "6.1.0",
|
||||||
"@dnd-kit/modifiers": "7.0.0",
|
"@dnd-kit/modifiers": "7.0.0",
|
||||||
"@dnd-kit/sortable": "8.0.0",
|
"@dnd-kit/sortable": "8.0.0",
|
||||||
"@grafana/data": "^9.5.2",
|
"@grafana/data": "^11.2.3",
|
||||||
"@mdx-js/loader": "2.3.0",
|
"@mdx-js/loader": "2.3.0",
|
||||||
"@mdx-js/react": "2.3.0",
|
"@mdx-js/react": "2.3.0",
|
||||||
"@monaco-editor/react": "^4.3.1",
|
"@monaco-editor/react": "^4.3.1",
|
||||||
"@radix-ui/react-tabs": "1.0.4",
|
"@radix-ui/react-tabs": "1.0.4",
|
||||||
"@radix-ui/react-tooltip": "1.0.7",
|
"@radix-ui/react-tooltip": "1.0.7",
|
||||||
"@sentry/react": "7.102.1",
|
"@sentry/react": "8.41.0",
|
||||||
"@sentry/webpack-plugin": "2.16.0",
|
"@sentry/webpack-plugin": "2.22.6",
|
||||||
"@signozhq/design-tokens": "0.0.8",
|
"@signozhq/design-tokens": "1.1.4",
|
||||||
|
"@tanstack/react-table": "8.20.6",
|
||||||
"@uiw/react-md-editor": "3.23.5",
|
"@uiw/react-md-editor": "3.23.5",
|
||||||
"@visx/group": "3.3.0",
|
"@visx/group": "3.3.0",
|
||||||
"@visx/shape": "3.5.0",
|
"@visx/shape": "3.5.0",
|
||||||
@@ -51,7 +52,7 @@
|
|||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"antd": "5.11.0",
|
"antd": "5.11.0",
|
||||||
"antd-table-saveas-excel": "2.2.1",
|
"antd-table-saveas-excel": "2.2.1",
|
||||||
"axios": "1.7.4",
|
"axios": "1.7.7",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^29.6.4",
|
"babel-jest": "^29.6.4",
|
||||||
"babel-loader": "9.1.3",
|
"babel-loader": "9.1.3",
|
||||||
@@ -76,7 +77,7 @@
|
|||||||
"fontfaceobserver": "2.3.0",
|
"fontfaceobserver": "2.3.0",
|
||||||
"history": "4.10.1",
|
"history": "4.10.1",
|
||||||
"html-webpack-plugin": "5.5.0",
|
"html-webpack-plugin": "5.5.0",
|
||||||
"http-proxy-middleware": "2.0.6",
|
"http-proxy-middleware": "3.0.3",
|
||||||
"i18next": "^21.6.12",
|
"i18next": "^21.6.12",
|
||||||
"i18next-browser-languagedetector": "^6.1.3",
|
"i18next-browser-languagedetector": "^6.1.3",
|
||||||
"i18next-http-backend": "^1.3.2",
|
"i18next-http-backend": "^1.3.2",
|
||||||
@@ -87,6 +88,8 @@
|
|||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
"lucide-react": "0.379.0",
|
"lucide-react": "0.379.0",
|
||||||
"mini-css-extract-plugin": "2.4.5",
|
"mini-css-extract-plugin": "2.4.5",
|
||||||
|
"overlayscrollbars": "^2.8.1",
|
||||||
|
"overlayscrollbars-react": "^0.5.6",
|
||||||
"papaparse": "5.4.1",
|
"papaparse": "5.4.1",
|
||||||
"posthog-js": "1.160.3",
|
"posthog-js": "1.160.3",
|
||||||
"rc-tween-one": "3.0.6",
|
"rc-tween-one": "3.0.6",
|
||||||
@@ -107,11 +110,10 @@
|
|||||||
"react-query": "3.39.3",
|
"react-query": "3.39.3",
|
||||||
"react-redux": "^7.2.2",
|
"react-redux": "^7.2.2",
|
||||||
"react-router-dom": "^5.2.0",
|
"react-router-dom": "^5.2.0",
|
||||||
|
"react-router-dom-v5-compat": "6.27.0",
|
||||||
"react-syntax-highlighter": "15.5.0",
|
"react-syntax-highlighter": "15.5.0",
|
||||||
"react-use": "^17.3.2",
|
"react-use": "^17.3.2",
|
||||||
"react-virtuoso": "4.0.3",
|
"react-virtuoso": "4.0.3",
|
||||||
"overlayscrollbars-react": "^0.5.6",
|
|
||||||
"overlayscrollbars": "^2.8.1",
|
|
||||||
"redux": "^4.0.5",
|
"redux": "^4.0.5",
|
||||||
"redux-thunk": "^2.3.0",
|
"redux-thunk": "^2.3.0",
|
||||||
"rehype-raw": "7.0.0",
|
"rehype-raw": "7.0.0",
|
||||||
@@ -123,11 +125,11 @@
|
|||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||||
"typescript": "^4.0.5",
|
"typescript": "^4.0.5",
|
||||||
"uplot": "1.6.26",
|
"uplot": "1.6.31",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-vitals": "^0.2.4",
|
"web-vitals": "^0.2.4",
|
||||||
"webpack": "5.88.2",
|
"webpack": "5.94.0",
|
||||||
"webpack-dev-server": "^4.15.1",
|
"webpack-dev-server": "^4.15.2",
|
||||||
"webpack-retry-chunk-load-plugin": "3.1.1",
|
"webpack-retry-chunk-load-plugin": "3.1.1",
|
||||||
"xstate": "^4.31.0"
|
"xstate": "^4.31.0"
|
||||||
},
|
},
|
||||||
@@ -152,6 +154,7 @@
|
|||||||
"@babel/preset-typescript": "^7.21.4",
|
"@babel/preset-typescript": "^7.21.4",
|
||||||
"@commitlint/cli": "^16.3.0",
|
"@commitlint/cli": "^16.3.0",
|
||||||
"@commitlint/config-conventional": "^16.2.4",
|
"@commitlint/config-conventional": "^16.2.4",
|
||||||
|
"@faker-js/faker": "9.3.0",
|
||||||
"@jest/globals": "^27.5.1",
|
"@jest/globals": "^27.5.1",
|
||||||
"@playwright/test": "^1.22.0",
|
"@playwright/test": "^1.22.0",
|
||||||
"@testing-library/jest-dom": "5.16.5",
|
"@testing-library/jest-dom": "5.16.5",
|
||||||
@@ -240,6 +243,9 @@
|
|||||||
"semver": "7.5.4",
|
"semver": "7.5.4",
|
||||||
"xml2js": "0.5.0",
|
"xml2js": "0.5.0",
|
||||||
"phin": "^3.7.1",
|
"phin": "^3.7.1",
|
||||||
"body-parser": "1.20.3"
|
"body-parser": "1.20.3",
|
||||||
|
"http-proxy-middleware": "3.0.3",
|
||||||
|
"cross-spawn": "7.0.5",
|
||||||
|
"cookie": "^0.7.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1
frontend/public/Icons/broom.svg
Normal file
1
frontend/public/Icons/broom.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="none"><path fill="#A65F3E" d="M8.04 10.331a.41.41 0 0 1-.414-.414.4.4 0 0 1 .121-.292l8.071-8.071a.414.414 0 1 1 .585.585l-8.07 8.071a.4.4 0 0 1-.293.121"/><path fill="#A65F3E" d="M16.11 1.5c.09 0 .178.034.245.101a.35.35 0 0 1 0 .492l-8.07 8.07a.346.346 0 0 1-.49 0 .35.35 0 0 1 0-.49l8.07-8.072a.35.35 0 0 1 .245-.101m0-.133a.48.48 0 0 0-.338.14L7.7 9.578a.47.47 0 0 0-.14.34.475.475 0 0 0 .478.478c.13 0 .25-.05.34-.14l8.07-8.071a.48.48 0 0 0-.339-.818"/><path fill="#FFE082" d="m1.701 12.438 3.89 3.889c.873-.963 1.62-2.057 2.023-3.313.03-.091.034-.24.128-.359.451-.566 1.865-2.008.706-3.167-1.106-1.106-2.438.227-2.994.686-.17.14-.384.228-.606.276-1.493.326-3.034 1.869-3.147 1.988"/><path fill="#FFE082" d="M8.385 8.577a.62.62 0 0 1 .393-.085c.098.018.237.135.38.28.144.143.28.304.32.408s-.005.242-.005.242c-.116.23-.383.69-.6.624-.24-.074-.482-.305-.66-.479a1.5 1.5 0 0 1-.276-.328c-.096-.177.008-.324.129-.447.086-.082.232-.17.319-.215"/><path fill="#F9C248" d="M8.327 8.975c.116.11.21.243.339.338.252.185.455.097.62-.052.049-.044.122-.1.17-.055a.1.1 0 0 1 .025.051.45.45 0 0 1-.045.273 1.3 1.3 0 0 1-.433.529c-.032.022-.07.044-.11.032a.12.12 0 0 1-.056-.045c-.207-.244-.37-.533-.626-.724-.103-.076-.364-.132-.298-.303.1-.262.317-.137.414-.044"/><path fill="#F9C248" d="M7.614 13.014c.028-.091.033-.24.127-.359.515-.645 1.223-1.38 1.145-2.275-.01-.123-.169-.75-.342-.514-.04.052-.024.315-.03.379-.1 1.172-1.02 1.821-1.19 2.024s-.164.393-.31.695a5 5 0 0 1-.61.947c-.379.47-.825.88-1.286 1.27a.8.8 0 0 0-.203.217c-.131.241.153.406.305.558l.369.368c.873-.961 1.62-2.055 2.025-3.31"/><path fill="#E2A610" d="M5.537 15.809c-.1-.157-.242-.3-.317-.458a.24.24 0 0 1-.03-.123c.01-.08.13-.15.187-.198q.129-.108.254-.22c.162-.149.314-.314.419-.509.017-.031.032-.07.016-.102-.035-.065-.238.152-.275.186-.105.092-.208.187-.318.272-.146.113-.422.304-.618.213-.1-.046-.19-.169-.263-.249-.084-.094-.164-.191-.252-.283a17 17 0 0 0-.592-.582c-.05-.046-.06-.066-.003-.122a10 10 0 0 0 .546-.58c.022-.025.044-.067.017-.09-.018-.015-.048-.004-.07.007-.26.138-.467.354-.692.544-.055.046-.214-.13-.249-.158-.092-.073-.154-.102-.046-.21.484-.49.972-.946 1.554-1.323.107-.07.22-.14.28-.253-.01-.03-.054-.026-.085-.015-.807.29-1.89 1.291-1.983 1.38-.162.158-.454-.206-.885-.481-.147-.094 0-.235.038-.279.26-.307.603-.642.603-.642-.127.013-.956.76-1.054.873-.084.097-.17.184-.175.318a.52.52 0 0 0 .107.325c.77 1.05 2.586 2.794 3.23 3.253.384.274.502.224.659.068a.35.35 0 0 0 .105-.3.65.65 0 0 0-.108-.263"/><path fill="#A65F3E" d="M8.835 10.176s-.438-.825-1.017-1.074c0 0-.02-.054.02-.1.052-.057.12-.07.157-.046.427.265.812.619 1.007 1.102.039.093-.131.23-.167.118"/><path fill="#F44336" d="M7.64 12.88c-.528-.818-1.63-1.937-2.46-2.524-.066-.046.204-.204.272-.156a9.7 9.7 0 0 1 2.31 2.398c.045.067-.091.33-.123.282M8.193 12.078c-.506-.71-1.521-1.738-2.238-2.312-.062-.05.182-.22.232-.181.755.602 1.668 1.499 2.18 2.263.037.057-.138.282-.174.23"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.9 KiB |
1
frontend/public/Icons/infraContainers.svg
Normal file
1
frontend/public/Icons/infraContainers.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="16" fill="none"><path fill="#616161" fill-rule="evenodd" d="M8.096 2.885H4.372V2.51h3.724zM8.096 4.79H4.372v-.375h3.724z" clip-rule="evenodd"/><path fill="#9E9E9E" d="M7.098 15.539H5.662V.936s.134-.311.719-.311.719.311.719.311v14.603z"/><path fill="#757575" d="M6.73.671V12.47H5.662v1.074c.181.001.345.023.493.055.336.074.576.37.576.714v1.227H7.1V.936c-.002 0-.08-.179-.37-.265"/><path fill="#2196F3" d="M10.58.54a3.03 3.03 0 0 0-3.028 3.038 3.02 3.02 0 0 0 3.027 3.028 3.025 3.025 0 0 0 3.028-3.028A3.035 3.035 0 0 0 10.579.54"/><path fill="#fff" d="M11.902 1.671c-.19-.048-.569-.098-1.321-.098-.753 0-1.132.05-1.322.098-.112.029-.488.185-.488.606v2.598c0 .142.115.258.258.258h.095v.288c0 .084.068.151.152.151h.306a.15.15 0 0 0 .151-.151v-.288h1.693v.288c0 .084.067.151.15.151h.307a.15.15 0 0 0 .151-.151v-.288h.098a.26.26 0 0 0 .259-.258V2.277c0-.404-.377-.579-.49-.606m-2.139.206c0-.064.051-.115.115-.115h1.403c.063 0 .115.051.115.115v.204a.115.115 0 0 1-.115.115H9.878a.115.115 0 0 1-.115-.115zm.024 2.736a.08.08 0 0 1-.078.078h-.308a.264.264 0 0 1-.264-.264v-.139c0-.042.035-.077.077-.077h.31c.144 0 .263.117.263.264zm2.235-.186a.264.264 0 0 1-.264.264h-.309a.08.08 0 0 1-.077-.078v-.138c0-.145.117-.264.264-.264h.308c.043 0 .078.035.078.077zm.07-1.129c0 .168-.363.46-1.513.46s-1.512-.27-1.512-.46v-.767c0-.05.05-.175.175-.175h2.695c.125 0 .155.126.155.175z"/><path fill="#F5F5F5" d="M8.61 12.867H4.15a.285.285 0 0 1-.285-.285v-5.15c0-.158.127-.285.285-.285h4.457c.158 0 .285.127.285.285v5.15a.285.285 0 0 1-.284.285"/><path fill="#82AEC0" d="M8.128 12.015H4.632l-.01-4.07H8.12z" opacity=".8"/><path fill="#F5F5F5" fill-rule="evenodd" d="M6.246 12.07V7.945h.25v4.123z" clip-rule="evenodd"/><path fill="#616161" d="M6.246 7.946H4.622v.34h1.624z"/><path fill="#F5F5F5" fill-rule="evenodd" d="M8.142 11.307H4.618v-.125h3.524zM8.142 10.482H4.618v-.125h3.524zM8.12 9.657H4.621v-.125H8.12zM8.12 8.833H4.617v-.125H8.12z" clip-rule="evenodd"/><path fill="#616161" d="M8.118 9.426H6.495v.34h1.623zM6.253 10.25H4.635v.34h1.618z"/><path fill="#9E9E9E" fill-rule="evenodd" d="M4.15 7.334a.097.097 0 0 0-.097.098v5.15c0 .054.044.097.098.097h4.458a.097.097 0 0 0 .097-.097v-5.15a.097.097 0 0 0-.098-.098zm-.472.098c0-.261.212-.473.473-.473h4.457c.261 0 .473.212.473.473v5.15c0 .26-.211.472-.472.472H4.151a.47.47 0 0 1-.473-.472z" clip-rule="evenodd"/><path fill="#757575" d="M4.17 12.682c-.194.017-.194-.11-.194-.145V7.493c0-.141.115-.256.256-.256H8.56c.118 0 .172.071.148.216 0 0-.015-.092-.128-.092H4.233a.133.133 0 0 0-.132.132v5.045c0 .117.069.144.069.144"/><path fill="#FFCA28" d="M4.9 1.775H.642v3.7h4.26z"/><path fill="#9E9E9E" d="M4.663 1.775c.132 0 .238.106.238.237v3.225a.237.237 0 0 1-.238.237H.88a.237.237 0 0 1-.238-.237V2.012c0-.131.107-.237.238-.237zm0-.25H.88a.49.49 0 0 0-.488.487v3.225c0 .269.22.487.488.487h3.783a.49.49 0 0 0 .488-.487V2.012a.487.487 0 0 0-.488-.487"/><path fill="#FFFDE7" fill-rule="evenodd" d="M4.902 3.11H.642v-.25h4.26zM4.902 4.388H.642v-.25h4.26z" clip-rule="evenodd"/><path fill="#757575" d="M1.975 2.186H.904v.282h1.07zM1.711 4.777H.904v.283h.807zM4.552 4.777h-.807v.283h.807zM4.552 2.186h-.807v.282h.807zM3.795 3.482H3.33v.282h.465zM4.552 3.482h-.465v.282h.465zM2.388 3.482H.904v.282h1.484z"/></svg>
|
||||||
|
After Width: | Height: | Size: 3.2 KiB |
1
frontend/public/Images/feature-graphic-correlation.svg
Normal file
1
frontend/public/Images/feature-graphic-correlation.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 408 KiB |
12
frontend/public/locales/en-GB/failedPayment.json
Normal file
12
frontend/public/locales/en-GB/failedPayment.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"workspaceSuspended": "Your workspace is locked",
|
||||||
|
"gotQuestions": "Got Questions?",
|
||||||
|
"contactUs": "Contact Us",
|
||||||
|
"actionHeader": "Pay to continue",
|
||||||
|
"actionDescription": "Pay now to keep enjoying all the great features you’ve been using.",
|
||||||
|
"yourDataIsSafe": "Your data is safe with us until",
|
||||||
|
"actNow": "Act now to avoid any disruptions and continue where you left off.",
|
||||||
|
"contactAdmin": "Contact your admin to proceed with the upgrade.",
|
||||||
|
"continueMyJourney": "Settle your bill to continue",
|
||||||
|
"somethingWentWrong": "Something went wrong"
|
||||||
|
}
|
||||||
8
frontend/public/locales/en-GB/infraMonitoring.json
Normal file
8
frontend/public/locales/en-GB/infraMonitoring.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"containers_visualization_message": "The ability to visualise containers is in active development and should be available to you soon.",
|
||||||
|
"processes_visualization_message": "The ability to visualise processes is in active development and should be available to you soon.",
|
||||||
|
"working_message": "We're working to extend infrastructure monitoring to take care of a bunch of different cases. Thank you for your patience.",
|
||||||
|
"waitlist_message": "Join the waitlist for early access.",
|
||||||
|
"waitlist_success_message": "We have received your request for early access. We will get back to you as soon as we launch the feature.",
|
||||||
|
"contact_support": "Contact Support"
|
||||||
|
}
|
||||||
24
frontend/public/locales/en-GB/messagingQueues.json
Normal file
24
frontend/public/locales/en-GB/messagingQueues.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"metricGraphCategory": {
|
||||||
|
"brokerMetrics": {
|
||||||
|
"title": "Broker Metrics",
|
||||||
|
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
|
||||||
|
},
|
||||||
|
"consumerMetrics": {
|
||||||
|
"title": "Consumer Metrics",
|
||||||
|
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
|
||||||
|
},
|
||||||
|
"producerMetrics": {
|
||||||
|
"title": "Producer Metrics",
|
||||||
|
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
|
||||||
|
},
|
||||||
|
"brokerJVMMetrics": {
|
||||||
|
"title": "Broker JVM Metrics",
|
||||||
|
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
|
||||||
|
},
|
||||||
|
"partitionMetrics": {
|
||||||
|
"title": "Partition Metrics",
|
||||||
|
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,30 +1,54 @@
|
|||||||
{
|
{
|
||||||
"breadcrumb": "Messaging Queues",
|
"breadcrumb": "Messaging Queues",
|
||||||
"header": "Kafka / Overview",
|
"header": "Kafka / Overview",
|
||||||
"overview": {
|
"overview": {
|
||||||
"title": "Start sending data in as little as 20 minutes",
|
"title": "Start sending data in as little as 20 minutes",
|
||||||
"subtitle": "Connect and Monitor Your Data Streams"
|
"subtitle": "Connect and Monitor Your Data Streams"
|
||||||
},
|
},
|
||||||
"configureConsumer": {
|
"configureConsumer": {
|
||||||
"title": "Configure Consumer",
|
"title": "Configure Consumer",
|
||||||
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
||||||
"button": "Get Started"
|
"button": "Get Started"
|
||||||
},
|
},
|
||||||
"configureProducer": {
|
"configureProducer": {
|
||||||
"title": "Configure Producer",
|
"title": "Configure Producer",
|
||||||
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
||||||
"button": "Get Started"
|
"button": "Get Started"
|
||||||
},
|
},
|
||||||
"monitorKafka": {
|
"monitorKafka": {
|
||||||
"title": "Monitor kafka",
|
"title": "Monitor kafka",
|
||||||
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
||||||
"button": "Get Started"
|
"button": "Get Started"
|
||||||
},
|
},
|
||||||
"summarySection": {
|
"summarySection": {
|
||||||
"viewDetailsButton": "View Details"
|
"viewDetailsButton": "View Details",
|
||||||
},
|
"consumer": {
|
||||||
"confirmModal": {
|
"title": "Consumer lag view",
|
||||||
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
"okText": "Proceed"
|
},
|
||||||
}
|
"producer": {
|
||||||
}
|
"title": "Producer latency view",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
},
|
||||||
|
"partition": {
|
||||||
|
"title": "Partition Latency view",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
},
|
||||||
|
"dropRate": {
|
||||||
|
"title": "Drop Rate view",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
},
|
||||||
|
"metricPage": {
|
||||||
|
"title": "Metric View",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"confirmModal": {
|
||||||
|
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
||||||
|
"okText": "Proceed"
|
||||||
|
},
|
||||||
|
"overviewSummarySection": {
|
||||||
|
"title": "Monitor Your Data Streams",
|
||||||
|
"subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,9 +3,10 @@
|
|||||||
"alert_channels": "Alert Channels",
|
"alert_channels": "Alert Channels",
|
||||||
"organization_settings": "Organization Settings",
|
"organization_settings": "Organization Settings",
|
||||||
"ingestion_settings": "Ingestion Settings",
|
"ingestion_settings": "Ingestion Settings",
|
||||||
"api_keys": "Access Tokens",
|
"api_keys": "API Keys",
|
||||||
"my_settings": "My Settings",
|
"my_settings": "My Settings",
|
||||||
"overview_metrics": "Overview Metrics",
|
"overview_metrics": "Overview Metrics",
|
||||||
|
"custom_domain_settings": "Custom Domain Settings",
|
||||||
"dbcall_metrics": "Database Calls",
|
"dbcall_metrics": "Database Calls",
|
||||||
"external_metrics": "External Calls",
|
"external_metrics": "External Calls",
|
||||||
"pipeline": "Pipeline",
|
"pipeline": "Pipeline",
|
||||||
|
|||||||
@@ -26,7 +26,8 @@
|
|||||||
"MY_SETTINGS": "SigNoz | My Settings",
|
"MY_SETTINGS": "SigNoz | My Settings",
|
||||||
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
||||||
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
|
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
|
||||||
"API_KEYS": "SigNoz | Access Tokens",
|
"API_KEYS": "SigNoz | API Keys",
|
||||||
|
"CUSTOM_DOMAIN_SETTINGS": "SigNoz | Custom Domain Settings",
|
||||||
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
||||||
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
||||||
"NOT_FOUND": "SigNoz | Page Not Found",
|
"NOT_FOUND": "SigNoz | Page Not Found",
|
||||||
@@ -37,8 +38,11 @@
|
|||||||
"PASSWORD_RESET": "SigNoz | Password Reset",
|
"PASSWORD_RESET": "SigNoz | Password Reset",
|
||||||
"LIST_LICENSES": "SigNoz | List of Licenses",
|
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||||
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
||||||
|
"WORKSPACE_SUSPENDED": "SigNoz | Workspace Suspended",
|
||||||
"SUPPORT": "SigNoz | Support",
|
"SUPPORT": "SigNoz | Support",
|
||||||
"DEFAULT": "Open source Observability Platform | SigNoz",
|
"DEFAULT": "Open source Observability Platform | SigNoz",
|
||||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview"
|
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
|
||||||
|
"INFRASTRUCTURE_MONITORING_HOSTS": "SigNoz | Infra Monitoring",
|
||||||
|
"INFRASTRUCTURE_MONITORING_KUBERNETES": "SigNoz | Infra Monitoring"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
{
|
{
|
||||||
"delete_confirm_message": "Are you sure you want to delete {{keyName}} token? Deleting a token is irreversible and cannot be undone."
|
"delete_confirm_message": "Are you sure you want to delete {{keyName}} key? Deleting a key is irreversible and cannot be undone."
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,9 @@
|
|||||||
"billing": "Billing",
|
"billing": "Billing",
|
||||||
"manage_billing_and_costs": "Manage your billing information, invoices, and monitor costs.",
|
"manage_billing_and_costs": "Manage your billing information, invoices, and monitor costs.",
|
||||||
"enterprise_cloud": "Enterprise Cloud",
|
"enterprise_cloud": "Enterprise Cloud",
|
||||||
|
"teams_cloud": "Teams Cloud",
|
||||||
"enterprise": "Enterprise",
|
"enterprise": "Enterprise",
|
||||||
|
"teams": "Teams",
|
||||||
"card_details_recieved_and_billing_info": "We have received your card details, your billing will only start after the end of your free trial period.",
|
"card_details_recieved_and_billing_info": "We have received your card details, your billing will only start after the end of your free trial period.",
|
||||||
"upgrade_plan": "Upgrade Plan",
|
"upgrade_plan": "Upgrade Plan",
|
||||||
"manage_billing": "Manage Billing",
|
"manage_billing": "Manage Billing",
|
||||||
|
|||||||
@@ -7,5 +7,5 @@
|
|||||||
"save": "Save",
|
"save": "Save",
|
||||||
"edit": "Edit",
|
"edit": "Edit",
|
||||||
"logged_in": "Logged In",
|
"logged_in": "Logged In",
|
||||||
"pending_data_placeholder": "Just a bit of patience, just a little bit’s enough ⎯ we’re getting your {{dataSource}}!"
|
"pending_data_placeholder": "Retrieving your {{dataSource}}!"
|
||||||
}
|
}
|
||||||
|
|||||||
12
frontend/public/locales/en/failedPayment.json
Normal file
12
frontend/public/locales/en/failedPayment.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"workspaceSuspended": "Your workspace is locked",
|
||||||
|
"gotQuestions": "Got Questions?",
|
||||||
|
"contactUs": "Contact Us",
|
||||||
|
"actionHeader": "Pay to continue",
|
||||||
|
"actionDescription": "Pay now to keep enjoying all the great features you’ve been using.",
|
||||||
|
"yourDataIsSafe": "Your data is safe with us until",
|
||||||
|
"actNow": "Act now to avoid any disruptions and continue where you left off.",
|
||||||
|
"contactAdmin": "Contact your admin to proceed with the upgrade.",
|
||||||
|
"continueMyJourney": "Settle your bill to continue",
|
||||||
|
"somethingWentWrong": "Something went wrong"
|
||||||
|
}
|
||||||
8
frontend/public/locales/en/infraMonitoring.json
Normal file
8
frontend/public/locales/en/infraMonitoring.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"containers_visualization_message": "The ability to visualise containers is in active development and should be available to you soon.",
|
||||||
|
"processes_visualization_message": "The ability to visualise processes is in active development and should be available to you soon.",
|
||||||
|
"working_message": "We're working to extend infrastructure monitoring to take care of a bunch of different cases. Thank you for your patience.",
|
||||||
|
"waitlist_message": "Join the waitlist for early access.",
|
||||||
|
"waitlist_success_message": "We have received your request for early access. We will get back to you as soon as we launch the feature.",
|
||||||
|
"contact_support": "Contact Support"
|
||||||
|
}
|
||||||
24
frontend/public/locales/en/messagingQueues.json
Normal file
24
frontend/public/locales/en/messagingQueues.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"metricGraphCategory": {
|
||||||
|
"brokerMetrics": {
|
||||||
|
"title": "Broker Metrics",
|
||||||
|
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
|
||||||
|
},
|
||||||
|
"consumerMetrics": {
|
||||||
|
"title": "Consumer Metrics",
|
||||||
|
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
|
||||||
|
},
|
||||||
|
"producerMetrics": {
|
||||||
|
"title": "Producer Metrics",
|
||||||
|
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
|
||||||
|
},
|
||||||
|
"brokerJVMMetrics": {
|
||||||
|
"title": "Broker JVM Metrics",
|
||||||
|
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
|
||||||
|
},
|
||||||
|
"partitionMetrics": {
|
||||||
|
"title": "Partition Metrics",
|
||||||
|
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,30 +1,54 @@
|
|||||||
{
|
{
|
||||||
"breadcrumb": "Messaging Queues",
|
"breadcrumb": "Messaging Queues",
|
||||||
"header": "Kafka / Overview",
|
"header": "Kafka / Overview",
|
||||||
"overview": {
|
"overview": {
|
||||||
"title": "Start sending data in as little as 20 minutes",
|
"title": "Start sending data in as little as 20 minutes",
|
||||||
"subtitle": "Connect and Monitor Your Data Streams"
|
"subtitle": "Connect and Monitor Your Data Streams"
|
||||||
},
|
},
|
||||||
"configureConsumer": {
|
"configureConsumer": {
|
||||||
"title": "Configure Consumer",
|
"title": "Configure Consumer",
|
||||||
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
"description": "Add consumer data sources to gain insights and enhance monitoring.",
|
||||||
"button": "Get Started"
|
"button": "Get Started"
|
||||||
},
|
},
|
||||||
"configureProducer": {
|
"configureProducer": {
|
||||||
"title": "Configure Producer",
|
"title": "Configure Producer",
|
||||||
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
"description": "Add producer data sources to gain insights and enhance monitoring.",
|
||||||
"button": "Get Started"
|
"button": "Get Started"
|
||||||
},
|
},
|
||||||
"monitorKafka": {
|
"monitorKafka": {
|
||||||
"title": "Monitor kafka",
|
"title": "Monitor kafka",
|
||||||
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
|
||||||
"button": "Get Started"
|
"button": "Get Started"
|
||||||
},
|
},
|
||||||
"summarySection": {
|
"summarySection": {
|
||||||
"viewDetailsButton": "View Details"
|
"viewDetailsButton": "View Details",
|
||||||
},
|
"consumer": {
|
||||||
"confirmModal": {
|
"title": "Consumer lag view",
|
||||||
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
"okText": "Proceed"
|
},
|
||||||
}
|
"producer": {
|
||||||
}
|
"title": "Producer latency view",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
},
|
||||||
|
"partition": {
|
||||||
|
"title": "Partition Latency view",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
},
|
||||||
|
"dropRate": {
|
||||||
|
"title": "Drop Rate view",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
},
|
||||||
|
"metricPage": {
|
||||||
|
"title": "Metric View",
|
||||||
|
"description": "Connect and Monitor Your Data Streams"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"confirmModal": {
|
||||||
|
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
|
||||||
|
"okText": "Proceed"
|
||||||
|
},
|
||||||
|
"overviewSummarySection": {
|
||||||
|
"title": "Monitor Your Data Streams",
|
||||||
|
"subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,9 +3,10 @@
|
|||||||
"alert_channels": "Alert Channels",
|
"alert_channels": "Alert Channels",
|
||||||
"organization_settings": "Organization Settings",
|
"organization_settings": "Organization Settings",
|
||||||
"ingestion_settings": "Ingestion Settings",
|
"ingestion_settings": "Ingestion Settings",
|
||||||
"api_keys": "Access Tokens",
|
"api_keys": "API Keys",
|
||||||
"my_settings": "My Settings",
|
"my_settings": "My Settings",
|
||||||
"overview_metrics": "Overview Metrics",
|
"overview_metrics": "Overview Metrics",
|
||||||
|
"custom_domain_settings": "Custom Domain Settings",
|
||||||
"dbcall_metrics": "Database Calls",
|
"dbcall_metrics": "Database Calls",
|
||||||
"external_metrics": "External Calls",
|
"external_metrics": "External Calls",
|
||||||
"pipeline": "Pipeline",
|
"pipeline": "Pipeline",
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
"SERVICE_METRICS": "SigNoz | Service Metrics",
|
"SERVICE_METRICS": "SigNoz | Service Metrics",
|
||||||
"SERVICE_MAP": "SigNoz | Service Map",
|
"SERVICE_MAP": "SigNoz | Service Map",
|
||||||
"GET_STARTED": "SigNoz | Get Started",
|
"GET_STARTED": "SigNoz | Get Started",
|
||||||
|
"ONBOARDING": "SigNoz | Get Started",
|
||||||
"GET_STARTED_APPLICATION_MONITORING": "SigNoz | Get Started | APM",
|
"GET_STARTED_APPLICATION_MONITORING": "SigNoz | Get Started | APM",
|
||||||
"GET_STARTED_LOGS_MANAGEMENT": "SigNoz | Get Started | Logs",
|
"GET_STARTED_LOGS_MANAGEMENT": "SigNoz | Get Started | Logs",
|
||||||
"GET_STARTED_INFRASTRUCTURE_MONITORING": "SigNoz | Get Started | Infrastructure",
|
"GET_STARTED_INFRASTRUCTURE_MONITORING": "SigNoz | Get Started | Infrastructure",
|
||||||
@@ -31,7 +32,8 @@
|
|||||||
"MY_SETTINGS": "SigNoz | My Settings",
|
"MY_SETTINGS": "SigNoz | My Settings",
|
||||||
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
||||||
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
|
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
|
||||||
"API_KEYS": "SigNoz | Access Tokens",
|
"API_KEYS": "SigNoz | API Keys",
|
||||||
|
"CUSTOM_DOMAIN_SETTINGS": "SigNoz | Custom Domain Settings",
|
||||||
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
||||||
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
||||||
"NOT_FOUND": "SigNoz | Page Not Found",
|
"NOT_FOUND": "SigNoz | Page Not Found",
|
||||||
@@ -44,6 +46,7 @@
|
|||||||
"PASSWORD_RESET": "SigNoz | Password Reset",
|
"PASSWORD_RESET": "SigNoz | Password Reset",
|
||||||
"LIST_LICENSES": "SigNoz | List of Licenses",
|
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||||
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
||||||
|
"WORKSPACE_SUSPENDED": "SigNoz | Workspace Suspended",
|
||||||
"SUPPORT": "SigNoz | Support",
|
"SUPPORT": "SigNoz | Support",
|
||||||
"LOGS_SAVE_VIEWS": "SigNoz | Logs Saved Views",
|
"LOGS_SAVE_VIEWS": "SigNoz | Logs Saved Views",
|
||||||
"TRACES_SAVE_VIEWS": "SigNoz | Traces Saved Views",
|
"TRACES_SAVE_VIEWS": "SigNoz | Traces Saved Views",
|
||||||
@@ -52,5 +55,7 @@
|
|||||||
"INTEGRATIONS": "SigNoz | Integrations",
|
"INTEGRATIONS": "SigNoz | Integrations",
|
||||||
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
"ALERT_HISTORY": "SigNoz | Alert Rule History",
|
||||||
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
|
"ALERT_OVERVIEW": "SigNoz | Alert Rule Overview",
|
||||||
"MESSAGING_QUEUES": "SigNoz | Messaging Queues"
|
"MESSAGING_QUEUES": "SigNoz | Messaging Queues",
|
||||||
|
"INFRASTRUCTURE_MONITORING_HOSTS": "SigNoz | Infra Monitoring",
|
||||||
|
"INFRASTRUCTURE_MONITORING_KUBERNETES": "SigNoz | Infra Monitoring"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,40 +1,48 @@
|
|||||||
/* eslint-disable react-hooks/exhaustive-deps */
|
|
||||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||||
import loginApi from 'api/user/login';
|
import setLocalStorageApi from 'api/browser/localstorage/set';
|
||||||
import { Logout } from 'api/utils';
|
import getOrgUser from 'api/user/getOrgUser';
|
||||||
import Spinner from 'components/Spinner';
|
|
||||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||||
import ROUTES from 'constants/routes';
|
import ROUTES from 'constants/routes';
|
||||||
import useLicense from 'hooks/useLicense';
|
|
||||||
import { useNotifications } from 'hooks/useNotifications';
|
|
||||||
import history from 'lib/history';
|
import history from 'lib/history';
|
||||||
import { ReactChild, useEffect, useMemo } from 'react';
|
import { isEmpty } from 'lodash-es';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useAppContext } from 'providers/App/App';
|
||||||
import { useDispatch, useSelector } from 'react-redux';
|
import { ReactChild, useCallback, useEffect, useMemo, useState } from 'react';
|
||||||
import { matchPath, Redirect, useLocation } from 'react-router-dom';
|
import { useQuery } from 'react-query';
|
||||||
import { Dispatch } from 'redux';
|
import { matchPath, useLocation } from 'react-router-dom';
|
||||||
import { AppState } from 'store/reducers';
|
import { LicenseState, LicenseStatus } from 'types/api/licensesV3/getActive';
|
||||||
import { getInitialUserTokenRefreshToken } from 'store/utils';
|
import { Organization } from 'types/api/user/getOrganization';
|
||||||
import AppActions from 'types/actions';
|
import { USER_ROLES } from 'types/roles';
|
||||||
import { UPDATE_USER_IS_FETCH } from 'types/actions/app';
|
import { isCloudUser } from 'utils/app';
|
||||||
import AppReducer from 'types/reducer/app';
|
|
||||||
import { routePermission } from 'utils/permission';
|
import { routePermission } from 'utils/permission';
|
||||||
|
|
||||||
import routes, {
|
import routes, {
|
||||||
LIST_LICENSES,
|
LIST_LICENSES,
|
||||||
oldNewRoutesMapping,
|
oldNewRoutesMapping,
|
||||||
oldRoutes,
|
oldRoutes,
|
||||||
|
ROUTES_NOT_TO_BE_OVERRIDEN,
|
||||||
|
SUPPORT_ROUTE,
|
||||||
} from './routes';
|
} from './routes';
|
||||||
import afterLogin from './utils';
|
|
||||||
|
|
||||||
function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||||
const location = useLocation();
|
const location = useLocation();
|
||||||
const { pathname } = location;
|
const { pathname } = location;
|
||||||
|
const {
|
||||||
|
org,
|
||||||
|
orgPreferences,
|
||||||
|
user,
|
||||||
|
isLoggedIn: isLoggedInState,
|
||||||
|
isFetchingOrgPreferences,
|
||||||
|
licenses,
|
||||||
|
isFetchingLicenses,
|
||||||
|
activeLicenseV3,
|
||||||
|
isFetchingActiveLicenseV3,
|
||||||
|
} = useAppContext();
|
||||||
|
|
||||||
|
const isAdmin = user.role === USER_ROLES.ADMIN;
|
||||||
const mapRoutes = useMemo(
|
const mapRoutes = useMemo(
|
||||||
() =>
|
() =>
|
||||||
new Map(
|
new Map(
|
||||||
[...routes, LIST_LICENSES].map((e) => {
|
[...routes, LIST_LICENSES, SUPPORT_ROUTE].map((e) => {
|
||||||
const currentPath = matchPath(pathname, {
|
const currentPath = matchPath(pathname, {
|
||||||
path: e.path,
|
path: e.path,
|
||||||
});
|
});
|
||||||
@@ -43,180 +51,188 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
|||||||
),
|
),
|
||||||
[pathname],
|
[pathname],
|
||||||
);
|
);
|
||||||
|
|
||||||
const {
|
|
||||||
data: licensesData,
|
|
||||||
isFetching: isFetchingLicensesData,
|
|
||||||
} = useLicense();
|
|
||||||
|
|
||||||
const {
|
|
||||||
isUserFetching,
|
|
||||||
isUserFetchingError,
|
|
||||||
isLoggedIn: isLoggedInState,
|
|
||||||
} = useSelector<AppState, AppReducer>((state) => state.app);
|
|
||||||
|
|
||||||
const { t } = useTranslation(['common']);
|
|
||||||
const localStorageUserAuthToken = getInitialUserTokenRefreshToken();
|
|
||||||
|
|
||||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
|
||||||
|
|
||||||
const { notifications } = useNotifications();
|
|
||||||
|
|
||||||
const currentRoute = mapRoutes.get('current');
|
|
||||||
|
|
||||||
const isOldRoute = oldRoutes.indexOf(pathname) > -1;
|
const isOldRoute = oldRoutes.indexOf(pathname) > -1;
|
||||||
|
const currentRoute = mapRoutes.get('current');
|
||||||
|
const isCloudUserVal = isCloudUser();
|
||||||
|
|
||||||
const isLocalStorageLoggedIn =
|
const [orgData, setOrgData] = useState<Organization | undefined>(undefined);
|
||||||
getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true';
|
|
||||||
|
|
||||||
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
|
const { data: orgUsers, isFetching: isFetchingOrgUsers } = useQuery({
|
||||||
dispatch({
|
queryFn: () => {
|
||||||
type: UPDATE_USER_IS_FETCH,
|
if (orgData && orgData.id !== undefined) {
|
||||||
payload: {
|
return getOrgUser({
|
||||||
isUserFetching: false,
|
orgId: orgData.id,
|
||||||
},
|
|
||||||
});
|
|
||||||
if (!isLoggedIn) {
|
|
||||||
history.push(ROUTES.LOGIN, { from: pathname });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleUserLoginIfTokenPresent = async (
|
|
||||||
key: keyof typeof ROUTES,
|
|
||||||
): Promise<void> => {
|
|
||||||
if (localStorageUserAuthToken?.refreshJwt) {
|
|
||||||
// localstorage token is present
|
|
||||||
|
|
||||||
// renew web access token
|
|
||||||
const response = await loginApi({
|
|
||||||
refreshToken: localStorageUserAuthToken?.refreshJwt,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (response.statusCode === 200) {
|
|
||||||
const route = routePermission[key];
|
|
||||||
|
|
||||||
// get all resource and put it over redux
|
|
||||||
const userResponse = await afterLogin(
|
|
||||||
response.payload.userId,
|
|
||||||
response.payload.accessJwt,
|
|
||||||
response.payload.refreshJwt,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (
|
|
||||||
userResponse &&
|
|
||||||
route &&
|
|
||||||
route.find((e) => e === userResponse.payload.role) === undefined
|
|
||||||
) {
|
|
||||||
history.push(ROUTES.UN_AUTHORIZED);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Logout();
|
|
||||||
|
|
||||||
notifications.error({
|
|
||||||
message: response.error || t('something_went_wrong'),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
return undefined;
|
||||||
};
|
},
|
||||||
|
queryKey: ['getOrgUser'],
|
||||||
|
enabled: !isEmpty(orgData) && user.role === 'ADMIN',
|
||||||
|
});
|
||||||
|
|
||||||
const handlePrivateRoutes = async (
|
const checkFirstTimeUser = useCallback((): boolean => {
|
||||||
key: keyof typeof ROUTES,
|
const users = orgUsers?.payload || [];
|
||||||
): Promise<void> => {
|
|
||||||
|
const remainingUsers = users.filter(
|
||||||
|
(user) => user.email !== 'admin@signoz.cloud',
|
||||||
|
);
|
||||||
|
|
||||||
|
return remainingUsers.length === 1;
|
||||||
|
}, [orgUsers?.payload]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
if (
|
if (
|
||||||
localStorageUserAuthToken &&
|
isCloudUserVal &&
|
||||||
localStorageUserAuthToken.refreshJwt &&
|
!isFetchingOrgPreferences &&
|
||||||
isUserFetching
|
orgPreferences &&
|
||||||
|
!isFetchingOrgUsers &&
|
||||||
|
orgUsers &&
|
||||||
|
orgUsers.payload
|
||||||
) {
|
) {
|
||||||
handleUserLoginIfTokenPresent(key);
|
const isOnboardingComplete = orgPreferences?.find(
|
||||||
} else {
|
(preference: Record<string, any>) => preference.key === 'ORG_ONBOARDING',
|
||||||
// user does have localstorage values
|
)?.value;
|
||||||
|
|
||||||
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
|
const isFirstUser = checkFirstTimeUser();
|
||||||
|
if (
|
||||||
|
isFirstUser &&
|
||||||
|
!isOnboardingComplete &&
|
||||||
|
// if the current route is allowed to be overriden by org onboarding then only do the same
|
||||||
|
!ROUTES_NOT_TO_BE_OVERRIDEN.includes(pathname)
|
||||||
|
) {
|
||||||
|
history.push(ROUTES.ONBOARDING);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
}, [
|
||||||
|
checkFirstTimeUser,
|
||||||
|
isCloudUserVal,
|
||||||
|
isFetchingOrgPreferences,
|
||||||
|
isFetchingOrgUsers,
|
||||||
|
orgPreferences,
|
||||||
|
orgUsers,
|
||||||
|
pathname,
|
||||||
|
]);
|
||||||
|
|
||||||
const navigateToWorkSpaceBlocked = (route: any): void => {
|
const navigateToWorkSpaceBlocked = (route: any): void => {
|
||||||
const { path } = route;
|
const { path } = route;
|
||||||
|
|
||||||
if (path && path !== ROUTES.WORKSPACE_LOCKED) {
|
const isRouteEnabledForWorkspaceBlockedState =
|
||||||
history.push(ROUTES.WORKSPACE_LOCKED);
|
isAdmin &&
|
||||||
|
(path === ROUTES.ORG_SETTINGS ||
|
||||||
|
path === ROUTES.BILLING ||
|
||||||
|
path === ROUTES.MY_SETTINGS);
|
||||||
|
|
||||||
dispatch({
|
if (
|
||||||
type: UPDATE_USER_IS_FETCH,
|
path &&
|
||||||
payload: {
|
path !== ROUTES.WORKSPACE_LOCKED &&
|
||||||
isUserFetching: false,
|
!isRouteEnabledForWorkspaceBlockedState
|
||||||
},
|
) {
|
||||||
});
|
history.push(ROUTES.WORKSPACE_LOCKED);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!isFetchingLicensesData) {
|
if (!isFetchingLicenses) {
|
||||||
const shouldBlockWorkspace = licensesData?.payload?.workSpaceBlock;
|
const currentRoute = mapRoutes.get('current');
|
||||||
|
const shouldBlockWorkspace = licenses?.workSpaceBlock;
|
||||||
|
|
||||||
if (shouldBlockWorkspace) {
|
if (shouldBlockWorkspace && currentRoute) {
|
||||||
navigateToWorkSpaceBlocked(currentRoute);
|
navigateToWorkSpaceBlocked(currentRoute);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, [isFetchingLicensesData]);
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [isFetchingLicenses, licenses?.workSpaceBlock, mapRoutes, pathname]);
|
||||||
|
|
||||||
|
const navigateToWorkSpaceSuspended = (route: any): void => {
|
||||||
|
const { path } = route;
|
||||||
|
|
||||||
|
if (path && path !== ROUTES.WORKSPACE_SUSPENDED) {
|
||||||
|
history.push(ROUTES.WORKSPACE_SUSPENDED);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!isFetchingActiveLicenseV3 && activeLicenseV3) {
|
||||||
|
const currentRoute = mapRoutes.get('current');
|
||||||
|
const shouldSuspendWorkspace =
|
||||||
|
activeLicenseV3.status === LicenseStatus.SUSPENDED &&
|
||||||
|
activeLicenseV3.state === LicenseState.PAYMENT_FAILED;
|
||||||
|
|
||||||
|
if (shouldSuspendWorkspace && currentRoute) {
|
||||||
|
navigateToWorkSpaceSuspended(currentRoute);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [isFetchingActiveLicenseV3, activeLicenseV3, mapRoutes, pathname]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (org && org.length > 0 && org[0].id !== undefined) {
|
||||||
|
setOrgData(org[0]);
|
||||||
|
}
|
||||||
|
}, [org]);
|
||||||
|
|
||||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
(async (): Promise<void> => {
|
// if it is an old route navigate to the new route
|
||||||
try {
|
if (isOldRoute) {
|
||||||
if (isOldRoute) {
|
const redirectUrl = oldNewRoutesMapping[pathname];
|
||||||
const redirectUrl = oldNewRoutesMapping[pathname];
|
|
||||||
|
|
||||||
const newLocation = {
|
const newLocation = {
|
||||||
...location,
|
...location,
|
||||||
pathname: redirectUrl,
|
pathname: redirectUrl,
|
||||||
};
|
};
|
||||||
history.replace(newLocation);
|
history.replace(newLocation);
|
||||||
}
|
return;
|
||||||
|
}
|
||||||
if (currentRoute) {
|
// if the current route
|
||||||
const { isPrivate, key } = currentRoute;
|
if (currentRoute) {
|
||||||
|
const { isPrivate, key } = currentRoute;
|
||||||
if (isPrivate && key !== String(ROUTES.WORKSPACE_LOCKED)) {
|
if (isPrivate) {
|
||||||
handlePrivateRoutes(key);
|
if (isLoggedInState) {
|
||||||
} else {
|
const route = routePermission[key];
|
||||||
// no need to fetch the user and make user fetching false
|
if (route && route.find((e) => e === user.role) === undefined) {
|
||||||
|
history.push(ROUTES.UN_AUTHORIZED);
|
||||||
if (getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true') {
|
|
||||||
history.push(ROUTES.APPLICATION);
|
|
||||||
}
|
|
||||||
dispatch({
|
|
||||||
type: UPDATE_USER_IS_FETCH,
|
|
||||||
payload: {
|
|
||||||
isUserFetching: false,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else if (pathname === ROUTES.HOME_PAGE) {
|
|
||||||
// routing to application page over root page
|
|
||||||
if (isLoggedInState) {
|
|
||||||
history.push(ROUTES.APPLICATION);
|
|
||||||
} else {
|
|
||||||
navigateToLoginIfNotLoggedIn();
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// not found
|
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
|
||||||
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
|
history.push(ROUTES.LOGIN);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} else if (isLoggedInState) {
|
||||||
// something went wrong
|
const fromPathname = getLocalStorageApi(
|
||||||
history.push(ROUTES.SOMETHING_WENT_WRONG);
|
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
|
||||||
|
);
|
||||||
|
if (fromPathname) {
|
||||||
|
history.push(fromPathname);
|
||||||
|
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
|
||||||
|
} else if (pathname !== ROUTES.SOMETHING_WENT_WRONG) {
|
||||||
|
history.push(ROUTES.APPLICATION);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// do nothing as the unauthenticated routes are LOGIN and SIGNUP and the LOGIN container takes care of routing to signup if
|
||||||
|
// setup is not completed
|
||||||
}
|
}
|
||||||
})();
|
} else if (isLoggedInState) {
|
||||||
}, [dispatch, isLoggedInState, currentRoute, licensesData]);
|
const fromPathname = getLocalStorageApi(
|
||||||
|
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
|
||||||
if (isUserFetchingError) {
|
);
|
||||||
return <Redirect to={ROUTES.SOMETHING_WENT_WRONG} />;
|
if (fromPathname) {
|
||||||
}
|
history.push(fromPathname);
|
||||||
|
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
|
||||||
if (isUserFetching) {
|
} else {
|
||||||
return <Spinner tip="Loading..." />;
|
history.push(ROUTES.APPLICATION);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
|
||||||
|
history.push(ROUTES.LOGIN);
|
||||||
|
}
|
||||||
|
}, [
|
||||||
|
licenses,
|
||||||
|
isLoggedInState,
|
||||||
|
pathname,
|
||||||
|
user,
|
||||||
|
isOldRoute,
|
||||||
|
currentRoute,
|
||||||
|
location,
|
||||||
|
]);
|
||||||
|
|
||||||
// NOTE: disabling this rule as there is no need to have div
|
// NOTE: disabling this rule as there is no need to have div
|
||||||
// eslint-disable-next-line react/jsx-no-useless-fragment
|
// eslint-disable-next-line react/jsx-no-useless-fragment
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import { ConfigProvider } from 'antd';
|
import { ConfigProvider } from 'antd';
|
||||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||||
import setLocalStorageApi from 'api/browser/localstorage/set';
|
import setLocalStorageApi from 'api/browser/localstorage/set';
|
||||||
import logEvent from 'api/common/logEvent';
|
|
||||||
import NotFound from 'components/NotFound';
|
import NotFound from 'components/NotFound';
|
||||||
import Spinner from 'components/Spinner';
|
import Spinner from 'components/Spinner';
|
||||||
import { FeatureKeys } from 'constants/features';
|
import { FeatureKeys } from 'constants/features';
|
||||||
@@ -10,27 +9,21 @@ import ROUTES from 'constants/routes';
|
|||||||
import AppLayout from 'container/AppLayout';
|
import AppLayout from 'container/AppLayout';
|
||||||
import useAnalytics from 'hooks/analytics/useAnalytics';
|
import useAnalytics from 'hooks/analytics/useAnalytics';
|
||||||
import { KeyboardHotkeysProvider } from 'hooks/hotkeys/useKeyboardHotkeys';
|
import { KeyboardHotkeysProvider } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||||
import { useIsDarkMode, useThemeConfig } from 'hooks/useDarkMode';
|
import { useThemeConfig } from 'hooks/useDarkMode';
|
||||||
import { THEME_MODE } from 'hooks/useDarkMode/constant';
|
import { LICENSE_PLAN_KEY } from 'hooks/useLicense';
|
||||||
import useFeatureFlags from 'hooks/useFeatureFlag';
|
|
||||||
import useGetFeatureFlag from 'hooks/useGetFeatureFlag';
|
|
||||||
import useLicense, { LICENSE_PLAN_KEY } from 'hooks/useLicense';
|
|
||||||
import { NotificationProvider } from 'hooks/useNotifications';
|
import { NotificationProvider } from 'hooks/useNotifications';
|
||||||
import { ResourceProvider } from 'hooks/useResourceAttribute';
|
import { ResourceProvider } from 'hooks/useResourceAttribute';
|
||||||
import history from 'lib/history';
|
import history from 'lib/history';
|
||||||
import { identity, pick, pickBy } from 'lodash-es';
|
import { identity, pickBy } from 'lodash-es';
|
||||||
import posthog from 'posthog-js';
|
import posthog from 'posthog-js';
|
||||||
import AlertRuleProvider from 'providers/Alert';
|
import AlertRuleProvider from 'providers/Alert';
|
||||||
|
import { useAppContext } from 'providers/App/App';
|
||||||
|
import { IUser } from 'providers/App/types';
|
||||||
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
|
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
|
||||||
import { QueryBuilderProvider } from 'providers/QueryBuilder';
|
import { QueryBuilderProvider } from 'providers/QueryBuilder';
|
||||||
import { Suspense, useEffect, useState } from 'react';
|
import { Suspense, useCallback, useEffect, useState } from 'react';
|
||||||
import { useDispatch, useSelector } from 'react-redux';
|
|
||||||
import { Route, Router, Switch } from 'react-router-dom';
|
import { Route, Router, Switch } from 'react-router-dom';
|
||||||
import { Dispatch } from 'redux';
|
import { CompatRouter } from 'react-router-dom-v5-compat';
|
||||||
import { AppState } from 'store/reducers';
|
|
||||||
import AppActions from 'types/actions';
|
|
||||||
import { UPDATE_FEATURE_FLAG_RESPONSE } from 'types/actions/app';
|
|
||||||
import AppReducer, { User } from 'types/reducer/app';
|
|
||||||
import { extractDomain, isCloudUser, isEECloudUser } from 'utils/app';
|
import { extractDomain, isCloudUser, isEECloudUser } from 'utils/app';
|
||||||
|
|
||||||
import PrivateRoute from './Private';
|
import PrivateRoute from './Private';
|
||||||
@@ -42,14 +35,20 @@ import defaultRoutes, {
|
|||||||
|
|
||||||
function App(): JSX.Element {
|
function App(): JSX.Element {
|
||||||
const themeConfig = useThemeConfig();
|
const themeConfig = useThemeConfig();
|
||||||
const { data: licenseData } = useLicense();
|
const {
|
||||||
|
licenses,
|
||||||
|
user,
|
||||||
|
isFetchingUser,
|
||||||
|
isFetchingLicenses,
|
||||||
|
isFetchingFeatureFlags,
|
||||||
|
userFetchError,
|
||||||
|
licensesFetchError,
|
||||||
|
featureFlagsFetchError,
|
||||||
|
isLoggedIn: isLoggedInState,
|
||||||
|
featureFlags,
|
||||||
|
org,
|
||||||
|
} = useAppContext();
|
||||||
const [routes, setRoutes] = useState<AppRoutes[]>(defaultRoutes);
|
const [routes, setRoutes] = useState<AppRoutes[]>(defaultRoutes);
|
||||||
const { role, isLoggedIn: isLoggedInState, user, org } = useSelector<
|
|
||||||
AppState,
|
|
||||||
AppReducer
|
|
||||||
>((state) => state.app);
|
|
||||||
|
|
||||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
|
||||||
|
|
||||||
const { trackPageView } = useAnalytics();
|
const { trackPageView } = useAnalytics();
|
||||||
|
|
||||||
@@ -57,218 +56,242 @@ function App(): JSX.Element {
|
|||||||
|
|
||||||
const isCloudUserVal = isCloudUser();
|
const isCloudUserVal = isCloudUser();
|
||||||
|
|
||||||
const isDarkMode = useIsDarkMode();
|
const enableAnalytics = useCallback(
|
||||||
|
(user: IUser): void => {
|
||||||
|
// wait for the required data to be loaded before doing init for anything!
|
||||||
|
if (!isFetchingLicenses && licenses && org) {
|
||||||
|
const orgName =
|
||||||
|
org && Array.isArray(org) && org.length > 0 ? org[0].name : '';
|
||||||
|
|
||||||
const isChatSupportEnabled =
|
const { name, email, role } = user;
|
||||||
useFeatureFlags(FeatureKeys.CHAT_SUPPORT)?.active || false;
|
|
||||||
|
|
||||||
const isPremiumSupportEnabled =
|
const identifyPayload = {
|
||||||
useFeatureFlags(FeatureKeys.PREMIUM_SUPPORT)?.active || false;
|
email,
|
||||||
|
name,
|
||||||
|
company_name: orgName,
|
||||||
|
role,
|
||||||
|
source: 'signoz-ui',
|
||||||
|
};
|
||||||
|
|
||||||
const featureResponse = useGetFeatureFlag((allFlags) => {
|
const sanitizedIdentifyPayload = pickBy(identifyPayload, identity);
|
||||||
dispatch({
|
const domain = extractDomain(email);
|
||||||
type: UPDATE_FEATURE_FLAG_RESPONSE,
|
const hostNameParts = hostname.split('.');
|
||||||
payload: {
|
|
||||||
featureFlag: allFlags,
|
|
||||||
refetch: featureResponse.refetch,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const isOnboardingEnabled =
|
const groupTraits = {
|
||||||
allFlags.find((flag) => flag.name === FeatureKeys.ONBOARDING)?.active ||
|
name: orgName,
|
||||||
false;
|
tenant_id: hostNameParts[0],
|
||||||
|
data_region: hostNameParts[1],
|
||||||
|
tenant_url: hostname,
|
||||||
|
company_domain: domain,
|
||||||
|
source: 'signoz-ui',
|
||||||
|
};
|
||||||
|
|
||||||
if (!isOnboardingEnabled || !isCloudUserVal) {
|
window.analytics.identify(email, sanitizedIdentifyPayload);
|
||||||
const newRoutes = routes.filter(
|
window.analytics.group(domain, groupTraits);
|
||||||
(route) => route?.path !== ROUTES.GET_STARTED,
|
|
||||||
);
|
|
||||||
|
|
||||||
setRoutes(newRoutes);
|
posthog?.identify(email, {
|
||||||
}
|
email,
|
||||||
});
|
name,
|
||||||
|
orgName,
|
||||||
|
tenant_id: hostNameParts[0],
|
||||||
|
data_region: hostNameParts[1],
|
||||||
|
tenant_url: hostname,
|
||||||
|
company_domain: domain,
|
||||||
|
source: 'signoz-ui',
|
||||||
|
isPaidUser: !!licenses?.trialConvertedToSubscription,
|
||||||
|
});
|
||||||
|
|
||||||
const isOnBasicPlan =
|
posthog?.group('company', domain, {
|
||||||
licenseData?.payload?.licenses?.some(
|
name: orgName,
|
||||||
(license) =>
|
tenant_id: hostNameParts[0],
|
||||||
license.isCurrent && license.planKey === LICENSE_PLAN_KEY.BASIC_PLAN,
|
data_region: hostNameParts[1],
|
||||||
) || licenseData?.payload?.licenses === null;
|
tenant_url: hostname,
|
||||||
|
company_domain: domain,
|
||||||
const enableAnalytics = (user: User): void => {
|
source: 'signoz-ui',
|
||||||
const orgName =
|
isPaidUser: !!licenses?.trialConvertedToSubscription,
|
||||||
org && Array.isArray(org) && org.length > 0 ? org[0].name : '';
|
});
|
||||||
|
}
|
||||||
const { name, email } = user;
|
},
|
||||||
|
[hostname, isFetchingLicenses, licenses, org],
|
||||||
const identifyPayload = {
|
);
|
||||||
email,
|
|
||||||
name,
|
|
||||||
company_name: orgName,
|
|
||||||
role,
|
|
||||||
source: 'signoz-ui',
|
|
||||||
};
|
|
||||||
|
|
||||||
const sanitizedIdentifyPayload = pickBy(identifyPayload, identity);
|
|
||||||
const domain = extractDomain(email);
|
|
||||||
const hostNameParts = hostname.split('.');
|
|
||||||
|
|
||||||
const groupTraits = {
|
|
||||||
name: orgName,
|
|
||||||
tenant_id: hostNameParts[0],
|
|
||||||
data_region: hostNameParts[1],
|
|
||||||
tenant_url: hostname,
|
|
||||||
company_domain: domain,
|
|
||||||
source: 'signoz-ui',
|
|
||||||
};
|
|
||||||
|
|
||||||
window.analytics.identify(email, sanitizedIdentifyPayload);
|
|
||||||
window.analytics.group(domain, groupTraits);
|
|
||||||
|
|
||||||
posthog?.identify(email, {
|
|
||||||
email,
|
|
||||||
name,
|
|
||||||
orgName,
|
|
||||||
tenant_id: hostNameParts[0],
|
|
||||||
data_region: hostNameParts[1],
|
|
||||||
tenant_url: hostname,
|
|
||||||
company_domain: domain,
|
|
||||||
source: 'signoz-ui',
|
|
||||||
isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription,
|
|
||||||
});
|
|
||||||
|
|
||||||
posthog?.group('company', domain, {
|
|
||||||
name: orgName,
|
|
||||||
tenant_id: hostNameParts[0],
|
|
||||||
data_region: hostNameParts[1],
|
|
||||||
tenant_url: hostname,
|
|
||||||
company_domain: domain,
|
|
||||||
source: 'signoz-ui',
|
|
||||||
isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
|
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const isIdentifiedUser = getLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER);
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
isLoggedInState &&
|
!isFetchingLicenses &&
|
||||||
|
licenses &&
|
||||||
|
!isFetchingUser &&
|
||||||
user &&
|
user &&
|
||||||
user.userId &&
|
!!user.email
|
||||||
user.email &&
|
|
||||||
!isIdentifiedUser
|
|
||||||
) {
|
) {
|
||||||
setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true');
|
const isOnBasicPlan =
|
||||||
}
|
licenses.licenses?.some(
|
||||||
|
(license) =>
|
||||||
|
license.isCurrent && license.planKey === LICENSE_PLAN_KEY.BASIC_PLAN,
|
||||||
|
) || licenses.licenses === null;
|
||||||
|
|
||||||
if (
|
const isIdentifiedUser = getLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER);
|
||||||
isOnBasicPlan ||
|
|
||||||
(isLoggedInState && role && role !== 'ADMIN') ||
|
|
||||||
!(isCloudUserVal || isEECloudUser())
|
|
||||||
) {
|
|
||||||
const newRoutes = routes.filter((route) => route?.path !== ROUTES.BILLING);
|
|
||||||
setRoutes(newRoutes);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isCloudUserVal || isEECloudUser()) {
|
if (isLoggedInState && user && user.id && user.email && !isIdentifiedUser) {
|
||||||
const newRoutes = [...routes, SUPPORT_ROUTE];
|
setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true');
|
||||||
|
}
|
||||||
|
|
||||||
setRoutes(newRoutes);
|
let updatedRoutes = defaultRoutes;
|
||||||
} else {
|
// if the user is a cloud user
|
||||||
const newRoutes = [...routes, LIST_LICENSES];
|
if (isCloudUserVal || isEECloudUser()) {
|
||||||
|
// if the user is on basic plan then remove billing
|
||||||
setRoutes(newRoutes);
|
if (isOnBasicPlan) {
|
||||||
}
|
updatedRoutes = updatedRoutes.filter(
|
||||||
|
(route) => route?.path !== ROUTES.BILLING,
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
);
|
||||||
}, [isLoggedInState, isOnBasicPlan, user]);
|
}
|
||||||
|
// always add support route for cloud users
|
||||||
useEffect(() => {
|
updatedRoutes = [...updatedRoutes, SUPPORT_ROUTE];
|
||||||
trackPageView(pathname);
|
} else {
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// if not a cloud user then remove billing and add list licenses route
|
||||||
}, [pathname]);
|
updatedRoutes = updatedRoutes.filter(
|
||||||
|
(route) => route?.path !== ROUTES.BILLING,
|
||||||
useEffect(() => {
|
);
|
||||||
const showAddCreditCardModal =
|
updatedRoutes = [...updatedRoutes, LIST_LICENSES];
|
||||||
!isPremiumSupportEnabled &&
|
}
|
||||||
!licenseData?.payload?.trialConvertedToSubscription;
|
setRoutes(updatedRoutes);
|
||||||
|
|
||||||
if (isLoggedInState && isChatSupportEnabled && !showAddCreditCardModal) {
|
|
||||||
window.Intercom('boot', {
|
|
||||||
app_id: process.env.INTERCOM_APP_ID,
|
|
||||||
email: user?.email || '',
|
|
||||||
name: user?.name || '',
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}, [
|
}, [
|
||||||
isLoggedInState,
|
isLoggedInState,
|
||||||
isChatSupportEnabled,
|
|
||||||
user,
|
user,
|
||||||
licenseData,
|
licenses,
|
||||||
isPremiumSupportEnabled,
|
isCloudUserVal,
|
||||||
|
isFetchingLicenses,
|
||||||
|
isFetchingUser,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (user && user?.email && user?.userId && user?.name) {
|
if (pathname === ROUTES.ONBOARDING) {
|
||||||
try {
|
window.Intercom('update', {
|
||||||
const isThemeAnalyticsSent = getLocalStorageApi(
|
hide_default_launcher: true,
|
||||||
LOCALSTORAGE.THEME_ANALYTICS_V1,
|
});
|
||||||
);
|
} else {
|
||||||
if (!isThemeAnalyticsSent) {
|
window.Intercom('update', {
|
||||||
logEvent('Theme Analytics', {
|
hide_default_launcher: false,
|
||||||
theme: isDarkMode ? THEME_MODE.DARK : THEME_MODE.LIGHT,
|
});
|
||||||
user: pick(user, ['email', 'userId', 'name']),
|
|
||||||
org,
|
|
||||||
});
|
|
||||||
setLocalStorageApi(LOCALSTORAGE.THEME_ANALYTICS_V1, 'true');
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
console.error('Failed to parse local storage theme analytics event');
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isCloudUserVal && user && user.email) {
|
trackPageView(pathname);
|
||||||
enableAnalytics(user);
|
}, [pathname, trackPageView]);
|
||||||
}
|
|
||||||
|
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
|
||||||
}, [user]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
console.info('We are hiring! https://jobs.gem.com/signoz');
|
// feature flag shouldn't be loading and featureFlags or fetchError any one of this should be true indicating that req is complete
|
||||||
}, []);
|
// licenses should also be present. there is no check for licenses for loading and error as that is mandatory if not present then routing
|
||||||
|
// to something went wrong which would ideally need a reload.
|
||||||
|
if (
|
||||||
|
!isFetchingFeatureFlags &&
|
||||||
|
(featureFlags || featureFlagsFetchError) &&
|
||||||
|
licenses
|
||||||
|
) {
|
||||||
|
let isChatSupportEnabled = false;
|
||||||
|
let isPremiumSupportEnabled = false;
|
||||||
|
if (featureFlags && featureFlags.length > 0) {
|
||||||
|
isChatSupportEnabled =
|
||||||
|
featureFlags.find((flag) => flag.name === FeatureKeys.CHAT_SUPPORT)
|
||||||
|
?.active || false;
|
||||||
|
|
||||||
|
isPremiumSupportEnabled =
|
||||||
|
featureFlags.find((flag) => flag.name === FeatureKeys.PREMIUM_SUPPORT)
|
||||||
|
?.active || false;
|
||||||
|
}
|
||||||
|
const showAddCreditCardModal =
|
||||||
|
!isPremiumSupportEnabled && !licenses.trialConvertedToSubscription;
|
||||||
|
|
||||||
|
if (isLoggedInState && isChatSupportEnabled && !showAddCreditCardModal) {
|
||||||
|
window.Intercom('boot', {
|
||||||
|
app_id: process.env.INTERCOM_APP_ID,
|
||||||
|
email: user?.email || '',
|
||||||
|
name: user?.name || '',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [
|
||||||
|
isLoggedInState,
|
||||||
|
user,
|
||||||
|
pathname,
|
||||||
|
licenses?.trialConvertedToSubscription,
|
||||||
|
featureFlags,
|
||||||
|
isFetchingFeatureFlags,
|
||||||
|
featureFlagsFetchError,
|
||||||
|
licenses,
|
||||||
|
]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!isFetchingUser && isCloudUserVal && user && user.email) {
|
||||||
|
enableAnalytics(user);
|
||||||
|
}
|
||||||
|
}, [user, isFetchingUser, isCloudUserVal, enableAnalytics]);
|
||||||
|
|
||||||
|
// if the user is in logged in state
|
||||||
|
if (isLoggedInState) {
|
||||||
|
if (pathname === ROUTES.HOME_PAGE) {
|
||||||
|
history.replace(ROUTES.APPLICATION);
|
||||||
|
}
|
||||||
|
// if the setup calls are loading then return a spinner
|
||||||
|
if (isFetchingLicenses || isFetchingUser || isFetchingFeatureFlags) {
|
||||||
|
return <Spinner tip="Loading..." />;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the required calls fails then return a something went wrong error
|
||||||
|
// this needs to be on top of data missing error because if there is an error, data will never be loaded and it will
|
||||||
|
// move to indefinitive loading
|
||||||
|
if (
|
||||||
|
(userFetchError || licensesFetchError) &&
|
||||||
|
pathname !== ROUTES.SOMETHING_WENT_WRONG
|
||||||
|
) {
|
||||||
|
history.replace(ROUTES.SOMETHING_WENT_WRONG);
|
||||||
|
}
|
||||||
|
|
||||||
|
// if all of the data is not set then return a spinner, this is required because there is some gap between loading states and data setting
|
||||||
|
if (
|
||||||
|
(!licenses || !user.email || !featureFlags) &&
|
||||||
|
!userFetchError &&
|
||||||
|
!licensesFetchError
|
||||||
|
) {
|
||||||
|
return <Spinner tip="Loading..." />;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ConfigProvider theme={themeConfig}>
|
<ConfigProvider theme={themeConfig}>
|
||||||
<Router history={history}>
|
<Router history={history}>
|
||||||
<NotificationProvider>
|
<CompatRouter>
|
||||||
<PrivateRoute>
|
<NotificationProvider>
|
||||||
<ResourceProvider>
|
<PrivateRoute>
|
||||||
<QueryBuilderProvider>
|
<ResourceProvider>
|
||||||
<DashboardProvider>
|
<QueryBuilderProvider>
|
||||||
<KeyboardHotkeysProvider>
|
<DashboardProvider>
|
||||||
<AlertRuleProvider>
|
<KeyboardHotkeysProvider>
|
||||||
<AppLayout>
|
<AlertRuleProvider>
|
||||||
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
<AppLayout>
|
||||||
<Switch>
|
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
|
||||||
{routes.map(({ path, component, exact }) => (
|
<Switch>
|
||||||
<Route
|
{routes.map(({ path, component, exact }) => (
|
||||||
key={`${path}`}
|
<Route
|
||||||
exact={exact}
|
key={`${path}`}
|
||||||
path={path}
|
exact={exact}
|
||||||
component={component}
|
path={path}
|
||||||
/>
|
component={component}
|
||||||
))}
|
/>
|
||||||
|
))}
|
||||||
|
|
||||||
<Route path="*" component={NotFound} />
|
<Route path="*" component={NotFound} />
|
||||||
</Switch>
|
</Switch>
|
||||||
</Suspense>
|
</Suspense>
|
||||||
</AppLayout>
|
</AppLayout>
|
||||||
</AlertRuleProvider>
|
</AlertRuleProvider>
|
||||||
</KeyboardHotkeysProvider>
|
</KeyboardHotkeysProvider>
|
||||||
</DashboardProvider>
|
</DashboardProvider>
|
||||||
</QueryBuilderProvider>
|
</QueryBuilderProvider>
|
||||||
</ResourceProvider>
|
</ResourceProvider>
|
||||||
</PrivateRoute>
|
</PrivateRoute>
|
||||||
</NotificationProvider>
|
</NotificationProvider>
|
||||||
|
</CompatRouter>
|
||||||
</Router>
|
</Router>
|
||||||
</ConfigProvider>
|
</ConfigProvider>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -66,6 +66,10 @@ export const Onboarding = Loadable(
|
|||||||
() => import(/* webpackChunkName: "Onboarding" */ 'pages/OnboardingPage'),
|
() => import(/* webpackChunkName: "Onboarding" */ 'pages/OnboardingPage'),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
export const OrgOnboarding = Loadable(
|
||||||
|
() => import(/* webpackChunkName: "OrgOnboarding" */ 'pages/OrgOnboarding'),
|
||||||
|
);
|
||||||
|
|
||||||
export const DashboardPage = Loadable(
|
export const DashboardPage = Loadable(
|
||||||
() =>
|
() =>
|
||||||
import(/* webpackChunkName: "DashboardPage" */ 'pages/DashboardsListPage'),
|
import(/* webpackChunkName: "DashboardPage" */ 'pages/DashboardsListPage'),
|
||||||
@@ -141,6 +145,11 @@ export const MySettings = Loadable(
|
|||||||
() => import(/* webpackChunkName: "All MySettings" */ 'pages/MySettings'),
|
() => import(/* webpackChunkName: "All MySettings" */ 'pages/MySettings'),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
export const CustomDomainSettings = Loadable(
|
||||||
|
() =>
|
||||||
|
import(/* webpackChunkName: "Custom Domain Settings" */ 'pages/Settings'),
|
||||||
|
);
|
||||||
|
|
||||||
export const Logs = Loadable(
|
export const Logs = Loadable(
|
||||||
() => import(/* webpackChunkName: "Logs" */ 'pages/LogsModulePage'),
|
() => import(/* webpackChunkName: "Logs" */ 'pages/LogsModulePage'),
|
||||||
);
|
);
|
||||||
@@ -176,7 +185,7 @@ export const PasswordReset = Loadable(
|
|||||||
export const SomethingWentWrong = Loadable(
|
export const SomethingWentWrong = Loadable(
|
||||||
() =>
|
() =>
|
||||||
import(
|
import(
|
||||||
/* webpackChunkName: "SomethingWentWrong" */ 'pages/SomethingWentWrong'
|
/* webpackChunkName: "ErrorBoundaryFallback" */ 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback'
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -202,6 +211,13 @@ export const WorkspaceBlocked = Loadable(
|
|||||||
import(/* webpackChunkName: "WorkspaceLocked" */ 'pages/WorkspaceLocked'),
|
import(/* webpackChunkName: "WorkspaceLocked" */ 'pages/WorkspaceLocked'),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
export const WorkspaceSuspended = Loadable(
|
||||||
|
() =>
|
||||||
|
import(
|
||||||
|
/* webpackChunkName: "WorkspaceSuspended" */ 'pages/WorkspaceSuspended/WorkspaceSuspended'
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
export const ShortcutsPage = Loadable(
|
export const ShortcutsPage = Loadable(
|
||||||
() => import(/* webpackChunkName: "ShortcutsPage" */ 'pages/Shortcuts'),
|
() => import(/* webpackChunkName: "ShortcutsPage" */ 'pages/Shortcuts'),
|
||||||
);
|
);
|
||||||
@@ -224,3 +240,10 @@ export const MQDetailPage = Loadable(
|
|||||||
/* webpackChunkName: "MQDetailPage" */ 'pages/MessagingQueues/MQDetailPage'
|
/* webpackChunkName: "MQDetailPage" */ 'pages/MessagingQueues/MQDetailPage'
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
export const InfrastructureMonitoring = Loadable(
|
||||||
|
() =>
|
||||||
|
import(
|
||||||
|
/* webpackChunkName: "InfrastructureMonitoring" */ 'pages/InfrastructureMonitoring'
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|||||||
@@ -10,11 +10,13 @@ import {
|
|||||||
BillingPage,
|
BillingPage,
|
||||||
CreateAlertChannelAlerts,
|
CreateAlertChannelAlerts,
|
||||||
CreateNewAlerts,
|
CreateNewAlerts,
|
||||||
|
CustomDomainSettings,
|
||||||
DashboardPage,
|
DashboardPage,
|
||||||
DashboardWidget,
|
DashboardWidget,
|
||||||
EditAlertChannelsAlerts,
|
EditAlertChannelsAlerts,
|
||||||
EditRulesPage,
|
EditRulesPage,
|
||||||
ErrorDetails,
|
ErrorDetails,
|
||||||
|
InfrastructureMonitoring,
|
||||||
IngestionSettings,
|
IngestionSettings,
|
||||||
InstalledIntegrations,
|
InstalledIntegrations,
|
||||||
LicensePage,
|
LicensePage,
|
||||||
@@ -32,6 +34,7 @@ import {
|
|||||||
OldLogsExplorer,
|
OldLogsExplorer,
|
||||||
Onboarding,
|
Onboarding,
|
||||||
OrganizationSettings,
|
OrganizationSettings,
|
||||||
|
OrgOnboarding,
|
||||||
PasswordReset,
|
PasswordReset,
|
||||||
PipelinePage,
|
PipelinePage,
|
||||||
ServiceMapPage,
|
ServiceMapPage,
|
||||||
@@ -51,6 +54,7 @@ import {
|
|||||||
UnAuthorized,
|
UnAuthorized,
|
||||||
UsageExplorerPage,
|
UsageExplorerPage,
|
||||||
WorkspaceBlocked,
|
WorkspaceBlocked,
|
||||||
|
WorkspaceSuspended,
|
||||||
} from './pageComponents';
|
} from './pageComponents';
|
||||||
|
|
||||||
const routes: AppRoutes[] = [
|
const routes: AppRoutes[] = [
|
||||||
@@ -68,6 +72,13 @@ const routes: AppRoutes[] = [
|
|||||||
isPrivate: true,
|
isPrivate: true,
|
||||||
key: 'GET_STARTED',
|
key: 'GET_STARTED',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
path: ROUTES.ONBOARDING,
|
||||||
|
exact: false,
|
||||||
|
component: OrgOnboarding,
|
||||||
|
isPrivate: true,
|
||||||
|
key: 'ONBOARDING',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
component: LogsIndexToFields,
|
component: LogsIndexToFields,
|
||||||
path: ROUTES.LOGS_INDEX_FIELDS,
|
path: ROUTES.LOGS_INDEX_FIELDS,
|
||||||
@@ -278,6 +289,13 @@ const routes: AppRoutes[] = [
|
|||||||
isPrivate: true,
|
isPrivate: true,
|
||||||
key: 'MY_SETTINGS',
|
key: 'MY_SETTINGS',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
path: ROUTES.CUSTOM_DOMAIN_SETTINGS,
|
||||||
|
exact: true,
|
||||||
|
component: CustomDomainSettings,
|
||||||
|
isPrivate: true,
|
||||||
|
key: 'CUSTOM_DOMAIN_SETTINGS',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
path: ROUTES.LOGS,
|
path: ROUTES.LOGS,
|
||||||
exact: true,
|
exact: true,
|
||||||
@@ -355,6 +373,13 @@ const routes: AppRoutes[] = [
|
|||||||
isPrivate: true,
|
isPrivate: true,
|
||||||
key: 'WORKSPACE_LOCKED',
|
key: 'WORKSPACE_LOCKED',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
path: ROUTES.WORKSPACE_SUSPENDED,
|
||||||
|
exact: true,
|
||||||
|
component: WorkspaceSuspended,
|
||||||
|
isPrivate: true,
|
||||||
|
key: 'WORKSPACE_SUSPENDED',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
path: ROUTES.SHORTCUTS,
|
path: ROUTES.SHORTCUTS,
|
||||||
exact: true,
|
exact: true,
|
||||||
@@ -383,6 +408,20 @@ const routes: AppRoutes[] = [
|
|||||||
key: 'MESSAGING_QUEUES_DETAIL',
|
key: 'MESSAGING_QUEUES_DETAIL',
|
||||||
isPrivate: true,
|
isPrivate: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
path: ROUTES.INFRASTRUCTURE_MONITORING_HOSTS,
|
||||||
|
exact: true,
|
||||||
|
component: InfrastructureMonitoring,
|
||||||
|
key: 'INFRASTRUCTURE_MONITORING_HOSTS',
|
||||||
|
isPrivate: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: ROUTES.INFRASTRUCTURE_MONITORING_KUBERNETES,
|
||||||
|
exact: true,
|
||||||
|
component: InfrastructureMonitoring,
|
||||||
|
key: 'INFRASTRUCTURE_MONITORING_KUBERNETES',
|
||||||
|
isPrivate: true,
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
export const SUPPORT_ROUTE: AppRoutes = {
|
export const SUPPORT_ROUTE: AppRoutes = {
|
||||||
@@ -403,24 +442,27 @@ export const LIST_LICENSES: AppRoutes = {
|
|||||||
|
|
||||||
export const oldRoutes = [
|
export const oldRoutes = [
|
||||||
'/pipelines',
|
'/pipelines',
|
||||||
'/logs/old-logs-explorer',
|
|
||||||
'/logs-explorer',
|
'/logs-explorer',
|
||||||
'/logs-explorer/live',
|
'/logs-explorer/live',
|
||||||
'/logs-save-views',
|
'/logs-save-views',
|
||||||
'/traces-save-views',
|
'/traces-save-views',
|
||||||
'/settings/api-keys',
|
'/settings/access-tokens',
|
||||||
];
|
];
|
||||||
|
|
||||||
export const oldNewRoutesMapping: Record<string, string> = {
|
export const oldNewRoutesMapping: Record<string, string> = {
|
||||||
'/pipelines': '/logs/pipelines',
|
'/pipelines': '/logs/pipelines',
|
||||||
'/logs/old-logs-explorer': '/logs/old-logs-explorer',
|
|
||||||
'/logs-explorer': '/logs/logs-explorer',
|
'/logs-explorer': '/logs/logs-explorer',
|
||||||
'/logs-explorer/live': '/logs/logs-explorer/live',
|
'/logs-explorer/live': '/logs/logs-explorer/live',
|
||||||
'/logs-save-views': '/logs/saved-views',
|
'/logs-save-views': '/logs/saved-views',
|
||||||
'/traces-save-views': '/traces/saved-views',
|
'/traces-save-views': '/traces/saved-views',
|
||||||
'/settings/api-keys': '/settings/access-tokens',
|
'/settings/access-tokens': '/settings/api-keys',
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export const ROUTES_NOT_TO_BE_OVERRIDEN: string[] = [
|
||||||
|
ROUTES.WORKSPACE_LOCKED,
|
||||||
|
ROUTES.WORKSPACE_SUSPENDED,
|
||||||
|
];
|
||||||
|
|
||||||
export interface AppRoutes {
|
export interface AppRoutes {
|
||||||
component: RouteProps['component'];
|
component: RouteProps['component'];
|
||||||
path: RouteProps['path'];
|
path: RouteProps['path'];
|
||||||
|
|||||||
@@ -1,92 +1,28 @@
|
|||||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
|
||||||
import setLocalStorageApi from 'api/browser/localstorage/set';
|
import setLocalStorageApi from 'api/browser/localstorage/set';
|
||||||
import getUserApi from 'api/user/getUser';
|
|
||||||
import { Logout } from 'api/utils';
|
|
||||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||||
import store from 'store';
|
|
||||||
import AppActions from 'types/actions';
|
|
||||||
import {
|
|
||||||
LOGGED_IN,
|
|
||||||
UPDATE_USER,
|
|
||||||
UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
|
|
||||||
UPDATE_USER_IS_FETCH,
|
|
||||||
} from 'types/actions/app';
|
|
||||||
import { SuccessResponse } from 'types/api';
|
|
||||||
import { PayloadProps } from 'types/api/user/getUser';
|
|
||||||
|
|
||||||
const afterLogin = async (
|
const afterLogin = (
|
||||||
userId: string,
|
userId: string,
|
||||||
authToken: string,
|
authToken: string,
|
||||||
refreshToken: string,
|
refreshToken: string,
|
||||||
): Promise<SuccessResponse<PayloadProps> | undefined> => {
|
interceptorRejected?: boolean,
|
||||||
|
): void => {
|
||||||
setLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN, authToken);
|
setLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN, authToken);
|
||||||
setLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN, refreshToken);
|
setLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN, refreshToken);
|
||||||
|
setLocalStorageApi(LOCALSTORAGE.USER_ID, userId);
|
||||||
|
setLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN, 'true');
|
||||||
|
|
||||||
store.dispatch<AppActions>({
|
if (!interceptorRejected) {
|
||||||
type: UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
|
window.dispatchEvent(
|
||||||
payload: {
|
new CustomEvent('AFTER_LOGIN', {
|
||||||
accessJwt: authToken,
|
detail: {
|
||||||
refreshJwt: refreshToken,
|
accessJWT: authToken,
|
||||||
},
|
refreshJWT: refreshToken,
|
||||||
});
|
id: userId,
|
||||||
|
},
|
||||||
const [getUserResponse] = await Promise.all([
|
}),
|
||||||
getUserApi({
|
);
|
||||||
userId,
|
|
||||||
token: authToken,
|
|
||||||
}),
|
|
||||||
]);
|
|
||||||
|
|
||||||
if (getUserResponse.statusCode === 200 && getUserResponse.payload) {
|
|
||||||
store.dispatch<AppActions>({
|
|
||||||
type: LOGGED_IN,
|
|
||||||
payload: {
|
|
||||||
isLoggedIn: true,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const { payload } = getUserResponse;
|
|
||||||
|
|
||||||
store.dispatch<AppActions>({
|
|
||||||
type: UPDATE_USER,
|
|
||||||
payload: {
|
|
||||||
ROLE: payload.role,
|
|
||||||
email: payload.email,
|
|
||||||
name: payload.name,
|
|
||||||
orgName: payload.organization,
|
|
||||||
profilePictureURL: payload.profilePictureURL,
|
|
||||||
userId: payload.id,
|
|
||||||
orgId: payload.orgId,
|
|
||||||
userFlags: payload.flags,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const isLoggedInLocalStorage = getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN);
|
|
||||||
|
|
||||||
if (isLoggedInLocalStorage === null) {
|
|
||||||
setLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN, 'true');
|
|
||||||
}
|
|
||||||
|
|
||||||
store.dispatch({
|
|
||||||
type: UPDATE_USER_IS_FETCH,
|
|
||||||
payload: {
|
|
||||||
isUserFetching: false,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
return getUserResponse;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
store.dispatch({
|
|
||||||
type: UPDATE_USER_IS_FETCH,
|
|
||||||
payload: {
|
|
||||||
isUserFetching: false,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
Logout();
|
|
||||||
|
|
||||||
return undefined;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export default afterLogin;
|
export default afterLogin;
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ const create = async (
|
|||||||
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
|
||||||
const response = await axios.post('/rules', {
|
const response = await axios.post('/rules', {
|
||||||
...props.data,
|
...props.data,
|
||||||
version: 'v4',
|
|
||||||
});
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ export const apiV2 = '/api/v2/';
|
|||||||
export const apiV3 = '/api/v3/';
|
export const apiV3 = '/api/v3/';
|
||||||
export const apiV4 = '/api/v4/';
|
export const apiV4 = '/api/v4/';
|
||||||
export const gatewayApiV1 = '/api/gateway/v1/';
|
export const gatewayApiV1 = '/api/gateway/v1/';
|
||||||
|
export const gatewayApiV2 = '/api/gateway/v2/';
|
||||||
export const apiAlertManager = '/api/alertmanager/';
|
export const apiAlertManager = '/api/alertmanager/';
|
||||||
|
|
||||||
export default apiV1;
|
export default apiV1;
|
||||||
|
|||||||
7
frontend/src/api/customDomain/getDeploymentsData.ts
Normal file
7
frontend/src/api/customDomain/getDeploymentsData.ts
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
import { GatewayApiV2Instance as axios } from 'api';
|
||||||
|
import { AxiosResponse } from 'axios';
|
||||||
|
import { DeploymentsDataProps } from 'types/api/customDomain/types';
|
||||||
|
|
||||||
|
export const getDeploymentsData = (): Promise<
|
||||||
|
AxiosResponse<DeploymentsDataProps>
|
||||||
|
> => axios.get(`/deployments/me`);
|
||||||
16
frontend/src/api/customDomain/updateSubDomain.ts
Normal file
16
frontend/src/api/customDomain/updateSubDomain.ts
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
import { GatewayApiV2Instance as axios } from 'api';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { SuccessResponse } from 'types/api';
|
||||||
|
import {
|
||||||
|
PayloadProps,
|
||||||
|
UpdateCustomDomainProps,
|
||||||
|
} from 'types/api/customDomain/types';
|
||||||
|
|
||||||
|
const updateSubDomainAPI = async (
|
||||||
|
props: UpdateCustomDomainProps,
|
||||||
|
): Promise<SuccessResponse<PayloadProps> | AxiosError> =>
|
||||||
|
axios.put(`/deployments/me/host`, {
|
||||||
|
...props.data,
|
||||||
|
});
|
||||||
|
|
||||||
|
export default updateSubDomainAPI;
|
||||||
@@ -7,7 +7,6 @@ import afterLogin from 'AppRoutes/utils';
|
|||||||
import axios, { AxiosResponse, InternalAxiosRequestConfig } from 'axios';
|
import axios, { AxiosResponse, InternalAxiosRequestConfig } from 'axios';
|
||||||
import { ENVIRONMENT } from 'constants/env';
|
import { ENVIRONMENT } from 'constants/env';
|
||||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||||
import store from 'store';
|
|
||||||
|
|
||||||
import apiV1, {
|
import apiV1, {
|
||||||
apiAlertManager,
|
apiAlertManager,
|
||||||
@@ -15,6 +14,7 @@ import apiV1, {
|
|||||||
apiV3,
|
apiV3,
|
||||||
apiV4,
|
apiV4,
|
||||||
gatewayApiV1,
|
gatewayApiV1,
|
||||||
|
gatewayApiV2,
|
||||||
} from './apiV1';
|
} from './apiV1';
|
||||||
import { Logout } from './utils';
|
import { Logout } from './utils';
|
||||||
|
|
||||||
@@ -25,10 +25,7 @@ const interceptorsResponse = (
|
|||||||
const interceptorsRequestResponse = (
|
const interceptorsRequestResponse = (
|
||||||
value: InternalAxiosRequestConfig,
|
value: InternalAxiosRequestConfig,
|
||||||
): InternalAxiosRequestConfig => {
|
): InternalAxiosRequestConfig => {
|
||||||
const token =
|
const token = getLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN) || '';
|
||||||
store.getState().app.user?.accessJwt ||
|
|
||||||
getLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN) ||
|
|
||||||
'';
|
|
||||||
|
|
||||||
if (value && value.headers) {
|
if (value && value.headers) {
|
||||||
value.headers.Authorization = token ? `Bearer ${token}` : '';
|
value.headers.Authorization = token ? `Bearer ${token}` : '';
|
||||||
@@ -46,41 +43,36 @@ const interceptorRejected = async (
|
|||||||
// reject the refresh token error
|
// reject the refresh token error
|
||||||
if (response.status === 401 && response.config.url !== '/login') {
|
if (response.status === 401 && response.config.url !== '/login') {
|
||||||
const response = await loginApi({
|
const response = await loginApi({
|
||||||
refreshToken: store.getState().app.user?.refreshJwt,
|
refreshToken: getLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN) || '',
|
||||||
});
|
});
|
||||||
|
|
||||||
if (response.statusCode === 200) {
|
if (response.statusCode === 200) {
|
||||||
const user = await afterLogin(
|
afterLogin(
|
||||||
response.payload.userId,
|
response.payload.userId,
|
||||||
response.payload.accessJwt,
|
response.payload.accessJwt,
|
||||||
response.payload.refreshJwt,
|
response.payload.refreshJwt,
|
||||||
|
true,
|
||||||
);
|
);
|
||||||
|
|
||||||
if (user) {
|
const reResponse = await axios(
|
||||||
const reResponse = await axios(
|
`${value.config.baseURL}${value.config.url?.substring(1)}`,
|
||||||
`${value.config.baseURL}${value.config.url?.substring(1)}`,
|
{
|
||||||
{
|
method: value.config.method,
|
||||||
method: value.config.method,
|
headers: {
|
||||||
headers: {
|
...value.config.headers,
|
||||||
...value.config.headers,
|
Authorization: `Bearer ${response.payload.accessJwt}`,
|
||||||
Authorization: `Bearer ${response.payload.accessJwt}`,
|
|
||||||
},
|
|
||||||
data: {
|
|
||||||
...JSON.parse(value.config.data || '{}'),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
);
|
data: {
|
||||||
|
...JSON.parse(value.config.data || '{}'),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
if (reResponse.status === 200) {
|
if (reResponse.status === 200) {
|
||||||
return await Promise.resolve(reResponse);
|
return await Promise.resolve(reResponse);
|
||||||
}
|
|
||||||
Logout();
|
|
||||||
|
|
||||||
return await Promise.reject(reResponse);
|
|
||||||
}
|
}
|
||||||
Logout();
|
Logout();
|
||||||
|
return await Promise.reject(reResponse);
|
||||||
return await Promise.reject(value);
|
|
||||||
}
|
}
|
||||||
Logout();
|
Logout();
|
||||||
}
|
}
|
||||||
@@ -169,6 +161,19 @@ GatewayApiV1Instance.interceptors.response.use(
|
|||||||
GatewayApiV1Instance.interceptors.request.use(interceptorsRequestResponse);
|
GatewayApiV1Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||||
//
|
//
|
||||||
|
|
||||||
|
// gateway Api V2
|
||||||
|
export const GatewayApiV2Instance = axios.create({
|
||||||
|
baseURL: `${ENVIRONMENT.baseURL}${gatewayApiV2}`,
|
||||||
|
});
|
||||||
|
|
||||||
|
GatewayApiV2Instance.interceptors.response.use(
|
||||||
|
interceptorsResponse,
|
||||||
|
interceptorRejected,
|
||||||
|
);
|
||||||
|
|
||||||
|
GatewayApiV2Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||||
|
//
|
||||||
|
|
||||||
AxiosAlertManagerInstance.interceptors.response.use(
|
AxiosAlertManagerInstance.interceptors.response.use(
|
||||||
interceptorsResponse,
|
interceptorsResponse,
|
||||||
interceptorRejected,
|
interceptorRejected,
|
||||||
|
|||||||
44
frontend/src/api/infra/getHostAttributeKeys.ts
Normal file
44
frontend/src/api/infra/getHostAttributeKeys.ts
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import { ApiBaseInstance } from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError, AxiosResponse } from 'axios';
|
||||||
|
import { baseAutoCompleteIdKeysOrder } from 'constants/queryBuilder';
|
||||||
|
import { K8sCategory } from 'container/InfraMonitoringK8s/constants';
|
||||||
|
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import {
|
||||||
|
BaseAutocompleteData,
|
||||||
|
IQueryAutocompleteResponse,
|
||||||
|
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
|
||||||
|
export const getHostAttributeKeys = async (
|
||||||
|
searchText = '',
|
||||||
|
entity: K8sCategory,
|
||||||
|
): Promise<SuccessResponse<IQueryAutocompleteResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response: AxiosResponse<{
|
||||||
|
data: IQueryAutocompleteResponse;
|
||||||
|
}> = await ApiBaseInstance.get(
|
||||||
|
`/${entity}/attribute_keys?dataSource=metrics&searchText=${searchText}`,
|
||||||
|
{
|
||||||
|
params: {
|
||||||
|
limit: 500,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const payload: BaseAutocompleteData[] =
|
||||||
|
response.data.data.attributeKeys?.map(({ id: _, ...item }) => ({
|
||||||
|
...item,
|
||||||
|
id: createIdFromObjectFields(item, baseAutoCompleteIdKeysOrder),
|
||||||
|
})) || [];
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.statusText,
|
||||||
|
payload: { attributeKeys: payload },
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
return ErrorResponseHandler(e as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
77
frontend/src/api/infraMonitoring/getHostLists.ts
Normal file
77
frontend/src/api/infraMonitoring/getHostLists.ts
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
|
export interface HostListPayload {
|
||||||
|
filters: TagFilter;
|
||||||
|
groupBy: BaseAutocompleteData[];
|
||||||
|
offset?: number;
|
||||||
|
limit?: number;
|
||||||
|
orderBy?: {
|
||||||
|
columnName: string;
|
||||||
|
order: 'asc' | 'desc';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TimeSeriesValue {
|
||||||
|
timestamp: number;
|
||||||
|
value: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TimeSeries {
|
||||||
|
labels: Record<string, string>;
|
||||||
|
labelsArray: Array<Record<string, string>>;
|
||||||
|
values: TimeSeriesValue[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface HostData {
|
||||||
|
hostName: string;
|
||||||
|
active: boolean;
|
||||||
|
os: string;
|
||||||
|
cpu: number;
|
||||||
|
cpuTimeSeries: TimeSeries;
|
||||||
|
memory: number;
|
||||||
|
memoryTimeSeries: TimeSeries;
|
||||||
|
wait: number;
|
||||||
|
waitTimeSeries: TimeSeries;
|
||||||
|
load15: number;
|
||||||
|
load15TimeSeries: TimeSeries;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface HostListResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
type: string;
|
||||||
|
records: HostData[];
|
||||||
|
groups: null;
|
||||||
|
total: number;
|
||||||
|
sentAnyHostMetricsData: boolean;
|
||||||
|
isSendingK8SAgentMetrics: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getHostLists = async (
|
||||||
|
props: HostListPayload,
|
||||||
|
signal?: AbortSignal,
|
||||||
|
headers?: Record<string, string>,
|
||||||
|
): Promise<SuccessResponse<HostListResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post('/hosts/list', props, {
|
||||||
|
signal,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: 'Success',
|
||||||
|
payload: response.data,
|
||||||
|
params: props,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
40
frontend/src/api/infraMonitoring/getInfraAttributeValues.ts
Normal file
40
frontend/src/api/infraMonitoring/getInfraAttributeValues.ts
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import createQueryParams from 'lib/createQueryParams';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import {
|
||||||
|
IAttributeValuesResponse,
|
||||||
|
IGetAttributeValuesPayload,
|
||||||
|
} from 'types/api/queryBuilder/getAttributesValues';
|
||||||
|
|
||||||
|
export const getInfraAttributesValues = async ({
|
||||||
|
dataSource,
|
||||||
|
attributeKey,
|
||||||
|
filterAttributeKeyDataType,
|
||||||
|
tagType,
|
||||||
|
searchText,
|
||||||
|
aggregateAttribute,
|
||||||
|
}: IGetAttributeValuesPayload): Promise<
|
||||||
|
SuccessResponse<IAttributeValuesResponse> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.get(
|
||||||
|
`/hosts/attribute_values?${createQueryParams({
|
||||||
|
dataSource,
|
||||||
|
attributeKey,
|
||||||
|
searchText,
|
||||||
|
aggregateAttribute,
|
||||||
|
})}&filterAttributeKeyDataType=${filterAttributeKeyDataType}&tagType=${tagType}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
64
frontend/src/api/infraMonitoring/getK8sClustersList.ts
Normal file
64
frontend/src/api/infraMonitoring/getK8sClustersList.ts
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
|
export interface K8sClustersListPayload {
|
||||||
|
filters: TagFilter;
|
||||||
|
groupBy?: BaseAutocompleteData[];
|
||||||
|
offset?: number;
|
||||||
|
limit?: number;
|
||||||
|
orderBy?: {
|
||||||
|
columnName: string;
|
||||||
|
order: 'asc' | 'desc';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sClustersData {
|
||||||
|
clusterUID: string;
|
||||||
|
cpuUsage: number;
|
||||||
|
cpuAllocatable: number;
|
||||||
|
memoryUsage: number;
|
||||||
|
memoryAllocatable: number;
|
||||||
|
meta: {
|
||||||
|
k8s_cluster_name: string;
|
||||||
|
k8s_cluster_uid: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sClustersListResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
type: string;
|
||||||
|
records: K8sClustersData[];
|
||||||
|
groups: null;
|
||||||
|
total: number;
|
||||||
|
sentAnyHostMetricsData: boolean;
|
||||||
|
isSendingK8SAgentMetrics: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getK8sClustersList = async (
|
||||||
|
props: K8sClustersListPayload,
|
||||||
|
signal?: AbortSignal,
|
||||||
|
headers?: Record<string, string>,
|
||||||
|
): Promise<SuccessResponse<K8sClustersListResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post('/clusters/list', props, {
|
||||||
|
signal,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: 'Success',
|
||||||
|
payload: response.data,
|
||||||
|
params: props,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
70
frontend/src/api/infraMonitoring/getK8sDeploymentsList.ts
Normal file
70
frontend/src/api/infraMonitoring/getK8sDeploymentsList.ts
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
|
export interface K8sDeploymentsListPayload {
|
||||||
|
filters: TagFilter;
|
||||||
|
groupBy?: BaseAutocompleteData[];
|
||||||
|
offset?: number;
|
||||||
|
limit?: number;
|
||||||
|
orderBy?: {
|
||||||
|
columnName: string;
|
||||||
|
order: 'asc' | 'desc';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sDeploymentsData {
|
||||||
|
deploymentName: string;
|
||||||
|
cpuUsage: number;
|
||||||
|
memoryUsage: number;
|
||||||
|
desiredPods: number;
|
||||||
|
availablePods: number;
|
||||||
|
cpuRequest: number;
|
||||||
|
memoryRequest: number;
|
||||||
|
cpuLimit: number;
|
||||||
|
memoryLimit: number;
|
||||||
|
restarts: number;
|
||||||
|
meta: {
|
||||||
|
k8s_cluster_name: string;
|
||||||
|
k8s_deployment_name: string;
|
||||||
|
k8s_namespace_name: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sDeploymentsListResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
type: string;
|
||||||
|
records: K8sDeploymentsData[];
|
||||||
|
groups: null;
|
||||||
|
total: number;
|
||||||
|
sentAnyHostMetricsData: boolean;
|
||||||
|
isSendingK8SAgentMetrics: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getK8sDeploymentsList = async (
|
||||||
|
props: K8sDeploymentsListPayload,
|
||||||
|
signal?: AbortSignal,
|
||||||
|
headers?: Record<string, string>,
|
||||||
|
): Promise<SuccessResponse<K8sDeploymentsListResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post('/deployments/list', props, {
|
||||||
|
signal,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: 'Success',
|
||||||
|
payload: response.data,
|
||||||
|
params: props,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
62
frontend/src/api/infraMonitoring/getK8sNamespacesList.ts
Normal file
62
frontend/src/api/infraMonitoring/getK8sNamespacesList.ts
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
|
export interface K8sNamespacesListPayload {
|
||||||
|
filters: TagFilter;
|
||||||
|
groupBy?: BaseAutocompleteData[];
|
||||||
|
offset?: number;
|
||||||
|
limit?: number;
|
||||||
|
orderBy?: {
|
||||||
|
columnName: string;
|
||||||
|
order: 'asc' | 'desc';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sNamespacesData {
|
||||||
|
namespaceName: string;
|
||||||
|
cpuUsage: number;
|
||||||
|
memoryUsage: number;
|
||||||
|
meta: {
|
||||||
|
k8s_cluster_name: string;
|
||||||
|
k8s_namespace_name: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sNamespacesListResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
type: string;
|
||||||
|
records: K8sNamespacesData[];
|
||||||
|
groups: null;
|
||||||
|
total: number;
|
||||||
|
sentAnyHostMetricsData: boolean;
|
||||||
|
isSendingK8SAgentMetrics: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getK8sNamespacesList = async (
|
||||||
|
props: K8sNamespacesListPayload,
|
||||||
|
signal?: AbortSignal,
|
||||||
|
headers?: Record<string, string>,
|
||||||
|
): Promise<SuccessResponse<K8sNamespacesListResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post('/namespaces/list', props, {
|
||||||
|
signal,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: 'Success',
|
||||||
|
payload: response.data,
|
||||||
|
params: props,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
65
frontend/src/api/infraMonitoring/getK8sNodesList.ts
Normal file
65
frontend/src/api/infraMonitoring/getK8sNodesList.ts
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
|
export interface K8sNodesListPayload {
|
||||||
|
filters: TagFilter;
|
||||||
|
groupBy?: BaseAutocompleteData[];
|
||||||
|
offset?: number;
|
||||||
|
limit?: number;
|
||||||
|
orderBy?: {
|
||||||
|
columnName: string;
|
||||||
|
order: 'asc' | 'desc';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sNodesData {
|
||||||
|
nodeUID: string;
|
||||||
|
nodeCPUUsage: number;
|
||||||
|
nodeCPUAllocatable: number;
|
||||||
|
nodeMemoryUsage: number;
|
||||||
|
nodeMemoryAllocatable: number;
|
||||||
|
meta: {
|
||||||
|
k8s_node_name: string;
|
||||||
|
k8s_node_uid: string;
|
||||||
|
k8s_cluster_name: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sNodesListResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
type: string;
|
||||||
|
records: K8sNodesData[];
|
||||||
|
groups: null;
|
||||||
|
total: number;
|
||||||
|
sentAnyHostMetricsData: boolean;
|
||||||
|
isSendingK8SAgentMetrics: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getK8sNodesList = async (
|
||||||
|
props: K8sNodesListPayload,
|
||||||
|
signal?: AbortSignal,
|
||||||
|
headers?: Record<string, string>,
|
||||||
|
): Promise<SuccessResponse<K8sNodesListResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post('/nodes/list', props, {
|
||||||
|
signal,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: 'Success',
|
||||||
|
payload: response.data,
|
||||||
|
params: props,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
93
frontend/src/api/infraMonitoring/getK8sPodsList.ts
Normal file
93
frontend/src/api/infraMonitoring/getK8sPodsList.ts
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||||
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
|
export interface K8sPodsListPayload {
|
||||||
|
filters: TagFilter;
|
||||||
|
groupBy?: BaseAutocompleteData[];
|
||||||
|
offset?: number;
|
||||||
|
limit?: number;
|
||||||
|
orderBy?: {
|
||||||
|
columnName: string;
|
||||||
|
order: 'asc' | 'desc';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TimeSeriesValue {
|
||||||
|
timestamp: number;
|
||||||
|
value: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TimeSeries {
|
||||||
|
labels: Record<string, string>;
|
||||||
|
labelsArray: Array<Record<string, string>>;
|
||||||
|
values: TimeSeriesValue[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sPodsData {
|
||||||
|
podUID: string;
|
||||||
|
podCPU: number;
|
||||||
|
podCPURequest: number;
|
||||||
|
podCPULimit: number;
|
||||||
|
podMemory: number;
|
||||||
|
podMemoryRequest: number;
|
||||||
|
podMemoryLimit: number;
|
||||||
|
restartCount: number;
|
||||||
|
meta: {
|
||||||
|
k8s_cronjob_name: string;
|
||||||
|
k8s_daemonset_name: string;
|
||||||
|
k8s_deployment_name: string;
|
||||||
|
k8s_job_name: string;
|
||||||
|
k8s_namespace_name: string;
|
||||||
|
k8s_node_name: string;
|
||||||
|
k8s_pod_name: string;
|
||||||
|
k8s_pod_uid: string;
|
||||||
|
k8s_statefulset_name: string;
|
||||||
|
k8s_cluster_name: string;
|
||||||
|
};
|
||||||
|
countByPhase: {
|
||||||
|
pending: number;
|
||||||
|
running: number;
|
||||||
|
succeeded: number;
|
||||||
|
failed: number;
|
||||||
|
unknown: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sPodsListResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
type: string;
|
||||||
|
records: K8sPodsData[];
|
||||||
|
groups: null;
|
||||||
|
total: number;
|
||||||
|
sentAnyHostMetricsData: boolean;
|
||||||
|
isSendingK8SAgentMetrics: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getK8sPodsList = async (
|
||||||
|
props: K8sPodsListPayload,
|
||||||
|
signal?: AbortSignal,
|
||||||
|
headers?: Record<string, string>,
|
||||||
|
): Promise<SuccessResponse<K8sPodsListResponse> | ErrorResponse> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post('/pods/list', props, {
|
||||||
|
signal,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: 'Success',
|
||||||
|
payload: response.data,
|
||||||
|
params: props,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler(error as AxiosError);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -1,24 +1,18 @@
|
|||||||
import { ApiV2Instance as axios } from 'api';
|
import { ApiV2Instance as axios } from 'api';
|
||||||
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
|
||||||
import { AxiosError } from 'axios';
|
|
||||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
import { PayloadProps } from 'types/api/licenses/getAll';
|
import { PayloadProps } from 'types/api/licenses/getAll';
|
||||||
|
|
||||||
const getAll = async (): Promise<
|
const getAll = async (): Promise<
|
||||||
SuccessResponse<PayloadProps> | ErrorResponse
|
SuccessResponse<PayloadProps> | ErrorResponse
|
||||||
> => {
|
> => {
|
||||||
try {
|
const response = await axios.get('/licenses');
|
||||||
const response = await axios.get('/licenses');
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
statusCode: 200,
|
statusCode: 200,
|
||||||
error: null,
|
error: null,
|
||||||
message: response.data.status,
|
message: response.data.status,
|
||||||
payload: response.data.data,
|
payload: response.data.data,
|
||||||
};
|
};
|
||||||
} catch (error) {
|
|
||||||
return ErrorResponseHandler(error as AxiosError);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export default getAll;
|
export default getAll;
|
||||||
|
|||||||
18
frontend/src/api/licensesV3/getActive.ts
Normal file
18
frontend/src/api/licensesV3/getActive.ts
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
import { ApiV3Instance as axios } from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { LicenseV3EventQueueResModel } from 'types/api/licensesV3/getActive';
|
||||||
|
|
||||||
|
const getActive = async (): Promise<
|
||||||
|
SuccessResponse<LicenseV3EventQueueResModel> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.get('/licenses/active');
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default getActive;
|
||||||
56
frontend/src/api/messagingQueues/getConsumerLagDetails.ts
Normal file
56
frontend/src/api/messagingQueues/getConsumerLagDetails.ts
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { MessagingQueueServiceDetailType } from 'pages/MessagingQueues/MessagingQueuesUtils';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
export interface MessagingQueueServicePayload {
|
||||||
|
start?: number | string;
|
||||||
|
end?: number | string;
|
||||||
|
variables?: {
|
||||||
|
partition?: string;
|
||||||
|
topic?: string;
|
||||||
|
consumer_group?: string;
|
||||||
|
service_name?: string;
|
||||||
|
};
|
||||||
|
detailType?: MessagingQueueServiceDetailType | 'producer' | 'consumer';
|
||||||
|
evalTime?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface MessagingQueuesPayloadProps {
|
||||||
|
status: string;
|
||||||
|
payload: {
|
||||||
|
resultType: string;
|
||||||
|
result: {
|
||||||
|
table: {
|
||||||
|
columns: {
|
||||||
|
name: string;
|
||||||
|
queryName: string;
|
||||||
|
isValueColumn: boolean;
|
||||||
|
}[];
|
||||||
|
rows: {
|
||||||
|
data: Record<string, string>;
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export const getConsumerLagDetails = async (
|
||||||
|
props: MessagingQueueServicePayload,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<MessagingQueuesPayloadProps['payload']> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const { detailType, ...restProps } = props;
|
||||||
|
const response = await axios.post(
|
||||||
|
`/messaging-queues/kafka/consumer-lag/${props.detailType}`,
|
||||||
|
{
|
||||||
|
...restProps,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
23
frontend/src/api/messagingQueues/getKafkaSpanEval.tsx
Normal file
23
frontend/src/api/messagingQueues/getKafkaSpanEval.tsx
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { DropRateAPIResponse } from 'pages/MessagingQueues/MQDetails/DropRateView/dropRateViewUtils';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
import { MessagingQueueServicePayload } from './getConsumerLagDetails';
|
||||||
|
|
||||||
|
export const getKafkaSpanEval = async (
|
||||||
|
props: Omit<MessagingQueueServicePayload, 'detailType' | 'variables'>,
|
||||||
|
): Promise<SuccessResponse<DropRateAPIResponse['data']> | ErrorResponse> => {
|
||||||
|
const { start, end, evalTime } = props;
|
||||||
|
const response = await axios.post(`messaging-queues/kafka/span/evaluation`, {
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
eval_time: evalTime,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { MessagingQueueServiceDetailType } from 'pages/MessagingQueues/MessagingQueuesUtils';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
import {
|
||||||
|
MessagingQueueServicePayload,
|
||||||
|
MessagingQueuesPayloadProps,
|
||||||
|
} from './getConsumerLagDetails';
|
||||||
|
|
||||||
|
export const getPartitionLatencyDetails = async (
|
||||||
|
props: MessagingQueueServicePayload,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<MessagingQueuesPayloadProps['payload']> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const { detailType, ...rest } = props;
|
||||||
|
let endpoint = '';
|
||||||
|
if (detailType === MessagingQueueServiceDetailType.ConsumerDetails) {
|
||||||
|
endpoint = `/messaging-queues/kafka/partition-latency/consumer`;
|
||||||
|
} else {
|
||||||
|
endpoint = `/messaging-queues/kafka/consumer-lag/producer-details`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await axios.post(endpoint, {
|
||||||
|
...rest,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
import {
|
||||||
|
MessagingQueueServicePayload,
|
||||||
|
MessagingQueuesPayloadProps,
|
||||||
|
} from './getConsumerLagDetails';
|
||||||
|
|
||||||
|
export const getPartitionLatencyOverview = async (
|
||||||
|
props: Omit<MessagingQueueServicePayload, 'detailType' | 'variables'>,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<MessagingQueuesPayloadProps['payload']> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.post(
|
||||||
|
`/messaging-queues/kafka/partition-latency/overview`,
|
||||||
|
{
|
||||||
|
...props,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
import {
|
||||||
|
MessagingQueueServicePayload,
|
||||||
|
MessagingQueuesPayloadProps,
|
||||||
|
} from './getConsumerLagDetails';
|
||||||
|
|
||||||
|
export const getTopicThroughputDetails = async (
|
||||||
|
props: MessagingQueueServicePayload,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<MessagingQueuesPayloadProps['payload']> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const { detailType, ...rest } = props;
|
||||||
|
const endpoint = `/messaging-queues/kafka/topic-throughput/${detailType}`;
|
||||||
|
const response = await axios.post(endpoint, {
|
||||||
|
...rest,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
import {
|
||||||
|
MessagingQueueServicePayload,
|
||||||
|
MessagingQueuesPayloadProps,
|
||||||
|
} from './getConsumerLagDetails';
|
||||||
|
|
||||||
|
export const getTopicThroughputOverview = async (
|
||||||
|
props: Omit<MessagingQueueServicePayload, 'variables'>,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<MessagingQueuesPayloadProps['payload']> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const { detailType, start, end } = props;
|
||||||
|
const response = await axios.post(
|
||||||
|
`messaging-queues/kafka/topic-throughput/${detailType}`,
|
||||||
|
{
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
import { ApiBaseInstance } from 'api';
|
||||||
|
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
|
||||||
|
import { AxiosError } from 'axios';
|
||||||
|
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
|
||||||
|
export interface OnboardingStatusResponse {
|
||||||
|
status: string;
|
||||||
|
data: {
|
||||||
|
attribute?: string;
|
||||||
|
error_message?: string;
|
||||||
|
status?: string;
|
||||||
|
}[];
|
||||||
|
}
|
||||||
|
|
||||||
|
const getOnboardingStatus = async (props: {
|
||||||
|
start: number;
|
||||||
|
end: number;
|
||||||
|
endpointService?: string;
|
||||||
|
}): Promise<SuccessResponse<OnboardingStatusResponse> | ErrorResponse> => {
|
||||||
|
const { endpointService, ...rest } = props;
|
||||||
|
try {
|
||||||
|
const response = await ApiBaseInstance.post(
|
||||||
|
`/messaging-queues/kafka/onboarding/${endpointService || 'consumers'}`,
|
||||||
|
rest,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
return ErrorResponseHandler((error as AxiosError) || SOMETHING_WENT_WRONG);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export default getOnboardingStatus;
|
||||||
20
frontend/src/api/onboarding/updateProfile.ts
Normal file
20
frontend/src/api/onboarding/updateProfile.ts
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import { GatewayApiV2Instance } from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { UpdateProfileProps } from 'types/api/onboarding/types';
|
||||||
|
|
||||||
|
const updateProfile = async (
|
||||||
|
props: UpdateProfileProps,
|
||||||
|
): Promise<SuccessResponse<UpdateProfileProps> | ErrorResponse> => {
|
||||||
|
const response = await GatewayApiV2Instance.put('/profiles/me', {
|
||||||
|
...props,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default updateProfile;
|
||||||
18
frontend/src/api/preferences/getAllOrgPreferences.ts
Normal file
18
frontend/src/api/preferences/getAllOrgPreferences.ts
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { GetAllOrgPreferencesResponseProps } from 'types/api/preferences/userOrgPreferences';
|
||||||
|
|
||||||
|
const getAllOrgPreferences = async (): Promise<
|
||||||
|
SuccessResponse<GetAllOrgPreferencesResponseProps> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.get(`/org/preferences`);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default getAllOrgPreferences;
|
||||||
18
frontend/src/api/preferences/getAllUserPreference.ts
Normal file
18
frontend/src/api/preferences/getAllUserPreference.ts
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { GetAllUserPreferencesResponseProps } from 'types/api/preferences/userOrgPreferences';
|
||||||
|
|
||||||
|
const getAllUserPreferences = async (): Promise<
|
||||||
|
SuccessResponse<GetAllUserPreferencesResponseProps> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.get(`/user/preferences`);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default getAllUserPreferences;
|
||||||
20
frontend/src/api/preferences/getOrgPreference.ts
Normal file
20
frontend/src/api/preferences/getOrgPreference.ts
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { GetOrgPreferenceResponseProps } from 'types/api/preferences/userOrgPreferences';
|
||||||
|
|
||||||
|
const getOrgPreference = async ({
|
||||||
|
preferenceID,
|
||||||
|
}: {
|
||||||
|
preferenceID: string;
|
||||||
|
}): Promise<SuccessResponse<GetOrgPreferenceResponseProps> | ErrorResponse> => {
|
||||||
|
const response = await axios.get(`/org/preferences/${preferenceID}`);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default getOrgPreference;
|
||||||
22
frontend/src/api/preferences/getUserPreference.ts
Normal file
22
frontend/src/api/preferences/getUserPreference.ts
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import { GetUserPreferenceResponseProps } from 'types/api/preferences/userOrgPreferences';
|
||||||
|
|
||||||
|
const getUserPreference = async ({
|
||||||
|
preferenceID,
|
||||||
|
}: {
|
||||||
|
preferenceID: string;
|
||||||
|
}): Promise<
|
||||||
|
SuccessResponse<GetUserPreferenceResponseProps> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.get(`/user/preferences/${preferenceID}`);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default getUserPreference;
|
||||||
28
frontend/src/api/preferences/updateOrgPreference.ts
Normal file
28
frontend/src/api/preferences/updateOrgPreference.ts
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import {
|
||||||
|
UpdateOrgPreferenceProps,
|
||||||
|
UpdateOrgPreferenceResponseProps,
|
||||||
|
} from 'types/api/preferences/userOrgPreferences';
|
||||||
|
|
||||||
|
const updateOrgPreference = async (
|
||||||
|
preferencePayload: UpdateOrgPreferenceProps,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<UpdateOrgPreferenceResponseProps> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.put(
|
||||||
|
`/org/preferences/${preferencePayload.preferenceID}`,
|
||||||
|
{
|
||||||
|
preference_value: preferencePayload.value,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default updateOrgPreference;
|
||||||
25
frontend/src/api/preferences/updateUserPreference.ts
Normal file
25
frontend/src/api/preferences/updateUserPreference.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
import axios from 'api';
|
||||||
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
|
import {
|
||||||
|
UpdateUserPreferenceProps,
|
||||||
|
UpdateUserPreferenceResponseProps,
|
||||||
|
} from 'types/api/preferences/userOrgPreferences';
|
||||||
|
|
||||||
|
const updateUserPreference = async (
|
||||||
|
preferencePayload: UpdateUserPreferenceProps,
|
||||||
|
): Promise<
|
||||||
|
SuccessResponse<UpdateUserPreferenceResponseProps> | ErrorResponse
|
||||||
|
> => {
|
||||||
|
const response = await axios.put(`/user/preferences`, {
|
||||||
|
preference_value: preferencePayload.value,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
statusCode: 200,
|
||||||
|
error: null,
|
||||||
|
message: response.data.status,
|
||||||
|
payload: response.data.data,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export default updateUserPreference;
|
||||||
@@ -5,7 +5,6 @@ import { baseAutoCompleteIdKeysOrder } from 'constants/queryBuilder';
|
|||||||
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
|
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
|
||||||
import createQueryParams from 'lib/createQueryParams';
|
import createQueryParams from 'lib/createQueryParams';
|
||||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||||
// ** Types
|
|
||||||
import { IGetAttributeKeysPayload } from 'types/api/queryBuilder/getAttributeKeys';
|
import { IGetAttributeKeysPayload } from 'types/api/queryBuilder/getAttributeKeys';
|
||||||
import {
|
import {
|
||||||
BaseAutocompleteData,
|
BaseAutocompleteData,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user