Compare commits
515 Commits
v0.55.0-cl
...
v0.71.0-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01b6e22bbd | ||
|
|
dc15ee8176 | ||
|
|
e414215786 | ||
|
|
5fe04078e5 | ||
|
|
cf95b15ba1 | ||
|
|
3b550c485d | ||
|
|
784dccf298 | ||
|
|
aa26dc77af | ||
|
|
e33a0fdd47 | ||
|
|
c8032f771e | ||
|
|
536656281d | ||
|
|
c6fda99b9b | ||
|
|
bbf64a7b52 | ||
|
|
5783f1555f | ||
|
|
93a8f97355 | ||
|
|
a84e462a65 | ||
|
|
d910d99689 | ||
|
|
e542e96031 | ||
|
|
3849ca1ecc | ||
|
|
d1e7cc128f | ||
|
|
ffd72cf406 | ||
|
|
6dfea14219 | ||
|
|
f2be856f63 | ||
|
|
f04589a0b2 | ||
|
|
1378590429 | ||
|
|
88084af4d4 | ||
|
|
d0eefa0cf2 | ||
|
|
cc9eb32c50 | ||
|
|
70169986be | ||
|
|
fb3b70b729 | ||
|
|
36f91d1993 | ||
|
|
333f90d8ac | ||
|
|
ea83a7e62d | ||
|
|
42a5d71d81 | ||
|
|
08e1fd3ca5 | ||
|
|
8cb8beb63f | ||
|
|
35c61045c4 | ||
|
|
7c85befc17 | ||
|
|
813ca8bc23 | ||
|
|
98cdbcd711 | ||
|
|
61a6c21edb | ||
|
|
8d6731c7ba | ||
|
|
6dc7a76853 | ||
|
|
cc3d78cd71 | ||
|
|
bd0c4beeee | ||
|
|
e83e691ef5 | ||
|
|
c30c882aae | ||
|
|
001122db2c | ||
|
|
b544a54c40 | ||
|
|
af6b54aeb4 | ||
|
|
1e61e6c2f6 | ||
|
|
390b04c015 | ||
|
|
c56345e1db | ||
|
|
7f25674640 | ||
|
|
df5ab64c83 | ||
|
|
726f2b0fa2 | ||
|
|
4a0b0aafbd | ||
|
|
837f434fe9 | ||
|
|
0baf0e9453 | ||
|
|
403043e076 | ||
|
|
7730f76128 | ||
|
|
6e3ffd555d | ||
|
|
c565c2b865 | ||
|
|
4ec1e66c7e | ||
|
|
89541862cc | ||
|
|
610f4d43e7 | ||
|
|
044a124cc1 | ||
|
|
0cf9003e3a | ||
|
|
644135a933 | ||
|
|
b465f74e4a | ||
|
|
e00e365964 | ||
|
|
5c45e1f7b3 | ||
|
|
16e61b45ac | ||
|
|
fdcdbf021a | ||
|
|
c92ef53e9c | ||
|
|
268f283785 | ||
|
|
c574adc634 | ||
|
|
939ab5270e | ||
|
|
42525b6067 | ||
|
|
c66cd3ce4e | ||
|
|
e9618d64bc | ||
|
|
8e11a988be | ||
|
|
92299e1b08 | ||
|
|
bab8c8274c | ||
|
|
265c67e5bd | ||
|
|
efc8c95d59 | ||
|
|
5708079c3c | ||
|
|
dbe78e55a9 | ||
|
|
a60371fb80 | ||
|
|
d5b847c091 | ||
|
|
c106f1c9a9 | ||
|
|
d6bfd95302 | ||
|
|
68ee677630 | ||
|
|
3ff862b483 | ||
|
|
f91badbce9 | ||
|
|
2ead4fbb66 | ||
|
|
56b17bcfef | ||
|
|
5839b65f7a | ||
|
|
3787c5ca24 | ||
|
|
458cd28cc2 | ||
|
|
64a4606275 | ||
|
|
505757b971 | ||
|
|
80740f646c | ||
|
|
e92d055c30 | ||
|
|
5c546e8efd | ||
|
|
ecd50f7232 | ||
|
|
15f85a645f | ||
|
|
366ca3bb3e | ||
|
|
43b0cdbb6a | ||
|
|
4967696da8 | ||
|
|
c5938b6c10 | ||
|
|
9feee6ff46 | ||
|
|
d48cdbfc4a | ||
|
|
dad72dd295 | ||
|
|
28d27bc5c1 | ||
|
|
3e675bb9a5 | ||
|
|
05c9dd68dd | ||
|
|
03fb388cd1 | ||
|
|
196b17dd1e | ||
|
|
93e9d15004 | ||
|
|
f11161ddb8 | ||
|
|
50db3cc39f | ||
|
|
6e27df9dcb | ||
|
|
7f6bad67d5 | ||
|
|
825d2dfcbb | ||
|
|
9f6419c2f8 | ||
|
|
421879cf7a | ||
|
|
00abadd429 | ||
|
|
14096f8d53 | ||
|
|
d2aa1cf06e | ||
|
|
838192cf5c | ||
|
|
5dfe245f2d | ||
|
|
53b86e4b5c | ||
|
|
5d9a2571df | ||
|
|
bef6cc945a | ||
|
|
2c2e248c95 | ||
|
|
2f62a9d36d | ||
|
|
04778b9641 | ||
|
|
26fe5e49e7 | ||
|
|
accafbc3ec | ||
|
|
8e7c78e1b1 | ||
|
|
53ebd39f41 | ||
|
|
b36ef944cc | ||
|
|
fa90fad373 | ||
|
|
77420b9d3a | ||
|
|
cecc57e72d | ||
|
|
512adc6471 | ||
|
|
42fefc65be | ||
|
|
dcc659907a | ||
|
|
b90ed375c2 | ||
|
|
a8a3bd3f7d | ||
|
|
7405bfbbee | ||
|
|
67e822e23e | ||
|
|
60dc479a19 | ||
|
|
85cf4f4e2e | ||
|
|
83aa48c721 | ||
|
|
823f84f857 | ||
|
|
8a4d45084d | ||
|
|
5bc6c33899 | ||
|
|
83f6dea2db | ||
|
|
7031c866e8 | ||
|
|
46bc7c7a21 | ||
|
|
6d9741c3a4 | ||
|
|
610a8ec704 | ||
|
|
cd9f27ab08 | ||
|
|
14fbb1fcda | ||
|
|
96da21df05 | ||
|
|
8608f02263 | ||
|
|
2701ae5c34 | ||
|
|
951593b0a3 | ||
|
|
e6766023dd | ||
|
|
bef5b96c5c | ||
|
|
b29359dee0 | ||
|
|
9a1cd65b73 | ||
|
|
8ab0c066d6 | ||
|
|
b333aa3775 | ||
|
|
8a3319cdf5 | ||
|
|
d09c4d947e | ||
|
|
2508e6f9f1 | ||
|
|
1b8213653a | ||
|
|
b499b10333 | ||
|
|
b35b975798 | ||
|
|
715f8a2363 | ||
|
|
8d1c4491b7 | ||
|
|
e3caa6a8f5 | ||
|
|
a1059ed949 | ||
|
|
8c46de8eac | ||
|
|
2b5a0ec496 | ||
|
|
a9440c010c | ||
|
|
f9e7eff357 | ||
|
|
0fbfb6b22b | ||
|
|
b25df66381 | ||
|
|
32fa5a403c | ||
|
|
f9d4cf19e9 | ||
|
|
81775c7d55 | ||
|
|
8d2666004b | ||
|
|
51baf7f8d3 | ||
|
|
31a2926375 | ||
|
|
8c6225185d | ||
|
|
d4458d65ad | ||
|
|
02d8fdb212 | ||
|
|
47d8c9e3e7 | ||
|
|
a383c708e3 | ||
|
|
99367be850 | ||
|
|
73bcc2af46 | ||
|
|
43f856c41b | ||
|
|
6384b25af3 | ||
|
|
507c0600cd | ||
|
|
3d092ec2ae | ||
|
|
2b8a610a07 | ||
|
|
f7f8bf1867 | ||
|
|
813cd845f4 | ||
|
|
6aee991633 | ||
|
|
2bfd31841e | ||
|
|
a320a16556 | ||
|
|
7cd8442e6e | ||
|
|
486632b64e | ||
|
|
328d955a74 | ||
|
|
a3e57a1829 | ||
|
|
24ab18d988 | ||
|
|
2e4956c2f7 | ||
|
|
b85f7921f4 | ||
|
|
0c2a15d86f | ||
|
|
afbba1ed44 | ||
|
|
20f748f9c4 | ||
|
|
96b5e0920f | ||
|
|
7fe4f8cc56 | ||
|
|
ed6abe5a95 | ||
|
|
a6968d452c | ||
|
|
0c5db1937e | ||
|
|
67058b2a17 | ||
|
|
e46d969143 | ||
|
|
e4505693b0 | ||
|
|
2dad9a3093 | ||
|
|
7b6bd83e9a | ||
|
|
d43adc24ef | ||
|
|
5044861773 | ||
|
|
71d1e12be7 | ||
|
|
5a70123b06 | ||
|
|
f410df846a | ||
|
|
d7bd72e2aa | ||
|
|
20e64b5102 | ||
|
|
0b03ff07f1 | ||
|
|
c01060ccf7 | ||
|
|
57c2326908 | ||
|
|
649560265e | ||
|
|
c8d0f7638e | ||
|
|
25484caa4c | ||
|
|
9ccc686c63 | ||
|
|
3ad6ff73df | ||
|
|
c93cf1ce95 | ||
|
|
a9ced66258 | ||
|
|
98a350692b | ||
|
|
d93f72f18d | ||
|
|
a59e7b9dfb | ||
|
|
91bbeaf175 | ||
|
|
22e61e1605 | ||
|
|
656d1c2b1c | ||
|
|
493ae4fd07 | ||
|
|
cd1ec561b1 | ||
|
|
0acf39a532 | ||
|
|
d859301d30 | ||
|
|
35f4eaa23b | ||
|
|
07c24bcdf3 | ||
|
|
77c5f17dce | ||
|
|
a11aadb712 | ||
|
|
bc9c7b5f1d | ||
|
|
1bba932d08 | ||
|
|
c1478c4e54 | ||
|
|
371224a64a | ||
|
|
504bc0d541 | ||
|
|
2faa0c6d4f | ||
|
|
969ac5028e | ||
|
|
3f7adeb040 | ||
|
|
323da3494b | ||
|
|
01fda51959 | ||
|
|
85ac21f253 | ||
|
|
fd9e9f0fb3 | ||
|
|
d5523fc092 | ||
|
|
2ec641b99e | ||
|
|
d1503f1418 | ||
|
|
e974e9d47f | ||
|
|
577a169508 | ||
|
|
939e2a3570 | ||
|
|
b64326070c | ||
|
|
63872983c6 | ||
|
|
eb6670980a | ||
|
|
831540eaf0 | ||
|
|
48f3b9cacb | ||
|
|
eaf8571fe9 | ||
|
|
22c10f9479 | ||
|
|
e748fb0655 | ||
|
|
fdc54a62a9 | ||
|
|
abe0ab69b0 | ||
|
|
e623c92615 | ||
|
|
dc5917db01 | ||
|
|
d6a7f0b6f4 | ||
|
|
471803115e | ||
|
|
8403a3362d | ||
|
|
64d46bc855 | ||
|
|
c9fee27604 | ||
|
|
f1b6b2d3d8 | ||
|
|
468f056530 | ||
|
|
7086470ce2 | ||
|
|
352296c6cd | ||
|
|
975307a8b8 | ||
|
|
12377be809 | ||
|
|
9d90b8d19c | ||
|
|
5005923ef4 | ||
|
|
db4338be42 | ||
|
|
c7d0598ec0 | ||
|
|
4978fb9599 | ||
|
|
7b18c3ba06 | ||
|
|
92cdb36879 | ||
|
|
580f0b816e | ||
|
|
b770fc2457 | ||
|
|
c177230cce | ||
|
|
2112047a02 | ||
|
|
03c193d5a1 | ||
|
|
b83b295318 | ||
|
|
fbe75cd057 | ||
|
|
860145fb1d | ||
|
|
2fe75e74cd | ||
|
|
8e19c346a4 | ||
|
|
1b33efe4cc | ||
|
|
2642338672 | ||
|
|
845dc00568 | ||
|
|
a1090bfdc5 | ||
|
|
44f41c55f9 | ||
|
|
42ac9ab6fe | ||
|
|
5c02250aae | ||
|
|
c49a9dac1a | ||
|
|
abc2ec2155 | ||
|
|
4dc5615d2f | ||
|
|
6c350f30aa | ||
|
|
6664e1bc02 | ||
|
|
438cbcef87 | ||
|
|
829e1f0920 | ||
|
|
68d25a8989 | ||
|
|
cc90321ac0 | ||
|
|
b10c22223b | ||
|
|
bdcae62bf9 | ||
|
|
cdde369748 | ||
|
|
4e26189778 | ||
|
|
523cbcd6fc | ||
|
|
eeadc021e1 | ||
|
|
952ab58023 | ||
|
|
3ca2fff5c5 | ||
|
|
ef3a9adb48 | ||
|
|
975f141604 | ||
|
|
c206f4fa5c | ||
|
|
e88e24e434 | ||
|
|
94e0423479 | ||
|
|
5891fbc229 | ||
|
|
8137ec54ba | ||
|
|
f7b80524a5 | ||
|
|
4be0508dd2 | ||
|
|
a31c4b8339 | ||
|
|
d7846338ce | ||
|
|
5dac1ad20a | ||
|
|
8d704c331c | ||
|
|
f8e47496fa | ||
|
|
6fef9d9676 | ||
|
|
190767fd0a | ||
|
|
1e78786cae | ||
|
|
6448fb17e7 | ||
|
|
f2e33d7ca9 | ||
|
|
6c7167a224 | ||
|
|
00421235b0 | ||
|
|
0e2b67059b | ||
|
|
910c44cefc | ||
|
|
8bad036423 | ||
|
|
a21830132f | ||
|
|
9419f56e95 | ||
|
|
347868c18b | ||
|
|
17e20e7f41 | ||
|
|
2b0da82f94 | ||
|
|
911362cecf | ||
|
|
481f9620d3 | ||
|
|
e5be431f18 | ||
|
|
503ed45a99 | ||
|
|
28818fbaac | ||
|
|
c0e40614bf | ||
|
|
2d732ae4a9 | ||
|
|
8466e31e02 | ||
|
|
efdaf7ee43 | ||
|
|
0dec94a5c6 | ||
|
|
204728ff60 | ||
|
|
e51f4d986d | ||
|
|
337a941d0d | ||
|
|
fc4b55cb34 | ||
|
|
96cb8053df | ||
|
|
5651d69485 | ||
|
|
a6e492880d | ||
|
|
80b3c3e256 | ||
|
|
0806420dd7 | ||
|
|
18e240e3d1 | ||
|
|
d0965a24c5 | ||
|
|
7ed689693f | ||
|
|
90ae55264a | ||
|
|
bf4c792cdb | ||
|
|
dd097821d1 | ||
|
|
701b8803ac | ||
|
|
2728ddd255 | ||
|
|
5187ed58a0 | ||
|
|
2180118094 | ||
|
|
ecae842fa1 | ||
|
|
291b3ba357 | ||
|
|
78d1e19e60 | ||
|
|
fa9e89bfe7 | ||
|
|
16f49a1d25 | ||
|
|
c95c0f9a15 | ||
|
|
5588c7dd3f | ||
|
|
679b5db5a2 | ||
|
|
64feff3539 | ||
|
|
1720d616f6 | ||
|
|
155a2ea557 | ||
|
|
d5c38ed0a4 | ||
|
|
b70d50f2b3 | ||
|
|
728f699051 | ||
|
|
3bbbc759d3 | ||
|
|
2230ca1740 | ||
|
|
440fd4e02b | ||
|
|
78a924d378 | ||
|
|
b03fadc2ec | ||
|
|
4b79d3b785 | ||
|
|
a24fb5d84f | ||
|
|
137059ded6 | ||
|
|
f1ce82ac25 | ||
|
|
4aeed392d7 | ||
|
|
4356ddae8c | ||
|
|
76e7de3aed | ||
|
|
ae5e63cc64 | ||
|
|
5ef05891ce | ||
|
|
c452e23b18 | ||
|
|
69aab87d72 | ||
|
|
a60674cf1b | ||
|
|
022b9226a7 | ||
|
|
36e2404814 | ||
|
|
2eb3f6cb06 | ||
|
|
98cbdf570f | ||
|
|
d380894c35 | ||
|
|
ea0263cc73 | ||
|
|
f38a1d9f1c | ||
|
|
9390a815a8 | ||
|
|
4f76e13dbe | ||
|
|
6a4643558c | ||
|
|
a98c8db949 | ||
|
|
5ba9c9d48c | ||
|
|
e1ca71dcea | ||
|
|
266ed58908 | ||
|
|
1411ae41c3 | ||
|
|
bc8891d2f8 | ||
|
|
3b7455ac4c | ||
|
|
5a0a7c2c60 | ||
|
|
794d6fc0ca | ||
|
|
4c95df44d5 | ||
|
|
717545e14c | ||
|
|
e4d1452f5f | ||
|
|
88ace79a64 | ||
|
|
9b42326f80 | ||
|
|
44a3469b9b | ||
|
|
ef4b70f67b | ||
|
|
7a125e31ec | ||
|
|
c7bd7566c5 | ||
|
|
f4fbe62169 | ||
|
|
6e3141a4ce | ||
|
|
fc8391c5aa | ||
|
|
87499d1ead | ||
|
|
5fa8686fcf | ||
|
|
dc2db524c7 | ||
|
|
b3545b767a | ||
|
|
55f653d92e | ||
|
|
35f8e133a9 | ||
|
|
58d6487f77 | ||
|
|
6685482ea6 | ||
|
|
708158f50f | ||
|
|
0feab5aa93 | ||
|
|
b49ed913c7 | ||
|
|
419d2da363 | ||
|
|
df2844ea74 | ||
|
|
5e5f0f167f | ||
|
|
a6b05f0a3d | ||
|
|
f69aaa2cfb | ||
|
|
3866f89d3e | ||
|
|
f9ac41b865 | ||
|
|
c5b5bfe540 | ||
|
|
f3c01a5155 | ||
|
|
033b64a62a | ||
|
|
4aabfe7cf5 | ||
|
|
0218f701b2 | ||
|
|
540a2c6712 | ||
|
|
08f3b089f4 | ||
|
|
1d8e5b6c0f | ||
|
|
0dcded59e5 | ||
|
|
bfb63ca8c4 | ||
|
|
71e24483dd | ||
|
|
317c41a166 | ||
|
|
ed4613cb1b | ||
|
|
6c06fea1aa | ||
|
|
6bc2f9125c | ||
|
|
262beef8f9 | ||
|
|
43cc6dea92 | ||
|
|
6684640abe | ||
|
|
0a146910d6 | ||
|
|
690ed0f7f1 | ||
|
|
5bcf7de440 | ||
|
|
703983a5f9 | ||
|
|
766a2123c5 | ||
|
|
a476c68f7e | ||
|
|
fc15aa6f1c | ||
|
|
4192fd573d | ||
|
|
ca13d80205 | ||
|
|
8d84ce8f06 | ||
|
|
09ea7b9eb5 |
@@ -3,4 +3,7 @@
|
||||
.vscode
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
sample-apps
|
||||
|
||||
# frontend
|
||||
node_modules
|
||||
1
.github/workflows/build.yaml
vendored
1
.github/workflows/build.yaml
vendored
@@ -3,7 +3,6 @@ name: build-pipeline
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
|
||||
83
.github/workflows/docs.yml
vendored
Normal file
83
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
name: "Update PR labels and Block PR until related docs are shipped for the feature"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docs_label_check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check PR Title and Manage Labels
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prTitle = context.payload.pull_request.title;
|
||||
const prNumber = context.payload.pull_request.number;
|
||||
const owner = context.repo.owner;
|
||||
const repo = context.repo.repo;
|
||||
|
||||
// Fetch the current PR details to get labels
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: prNumber
|
||||
});
|
||||
|
||||
const labels = pr.data.labels.map(label => label.name);
|
||||
|
||||
if (prTitle.startsWith('feat:')) {
|
||||
const hasDocsRequired = labels.includes('docs required');
|
||||
const hasDocsShipped = labels.includes('docs shipped');
|
||||
const hasDocsNotRequired = labels.includes('docs not required');
|
||||
|
||||
// If "docs not required" is present, skip the checks
|
||||
if (hasDocsNotRequired && !hasDocsRequired) {
|
||||
console.log("Skipping checks due to 'docs not required' label.");
|
||||
return; // Exit the script early
|
||||
}
|
||||
|
||||
// If "docs shipped" is present, remove "docs required" if it exists
|
||||
if (hasDocsShipped && hasDocsRequired) {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: prNumber,
|
||||
name: 'docs required'
|
||||
});
|
||||
console.log("Removed 'docs required' label.");
|
||||
}
|
||||
|
||||
// Add "docs required" label if neither "docs shipped" nor "docs required" are present
|
||||
if (!hasDocsRequired && !hasDocsShipped) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: prNumber,
|
||||
labels: ['docs required']
|
||||
});
|
||||
console.log("Added 'docs required' label.");
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the updated labels after any changes
|
||||
const updatedPr = await github.rest.pulls.get({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: prNumber
|
||||
});
|
||||
|
||||
const updatedLabels = updatedPr.data.labels.map(label => label.name);
|
||||
const updatedHasDocsRequired = updatedLabels.includes('docs required');
|
||||
const updatedHasDocsShipped = updatedLabels.includes('docs shipped');
|
||||
|
||||
// Block PR if "docs required" is still present and "docs shipped" is missing
|
||||
if (updatedHasDocsRequired && !updatedHasDocsShipped) {
|
||||
core.setFailed("This PR requires documentation. Please remove the 'docs required' label and add the 'docs shipped' label to proceed.");
|
||||
}
|
||||
2
.github/workflows/e2e-k3s.yaml
vendored
2
.github/workflows/e2e-k3s.yaml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
|
||||
35
.github/workflows/goreleaser.yaml
vendored
Normal file
35
.github/workflows/goreleaser.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
- histogram-quantile/v*
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
workdirs:
|
||||
- scripts/clickhouse/histogramquantile
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: set-up-go
|
||||
uses: actions/setup-go@v5
|
||||
- name: run-goreleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: '~> v2'
|
||||
args: release --clean
|
||||
workdir: ${{ matrix.workdirs }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
5
.github/workflows/jest-coverage-changes.yml
vendored
5
.github/workflows/jest-coverage-changes.yml
vendored
@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: develop
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -11,7 +12,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/heads/develop"
|
||||
ref: "refs/heads/main"
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||
|
||||
- name: Fetch branch
|
||||
|
||||
19
.github/workflows/pr_verify_linked_issue.yml
vendored
19
.github/workflows/pr_verify_linked_issue.yml
vendored
@@ -1,19 +0,0 @@
|
||||
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||
# a comment alerting users of this missing requirement.
|
||||
name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, opened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
verify_linked_issue:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: srikanthccv/verify-linked-issue-action@v0.71
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
36
.github/workflows/prereleaser.yaml
vendored
Normal file
36
.github/workflows/prereleaser.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||
schedule:
|
||||
- cron: '30 9 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_type:
|
||||
description: "Type of the release"
|
||||
type: choice
|
||||
required: true
|
||||
options:
|
||||
- 'patch'
|
||||
- 'minor'
|
||||
- 'major'
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
uses: signoz/primus.workflows/.github/workflows/github-verify.yaml@main
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_TEAM_NAME: releaser
|
||||
GITHUB_MEMBER_NAME: ${{ github.actor }}
|
||||
signoz:
|
||||
if: ${{ always() && (needs.verify.result == 'success' || github.event.name == 'schedule') }}
|
||||
uses: signoz/primus.workflows/.github/workflows/releaser.yaml@main
|
||||
secrets: inherit
|
||||
needs: [verify]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
PROJECT_NAME: signoz
|
||||
RELEASE_TYPE: ${{ inputs.release_type || 'minor' }}
|
||||
12
.github/workflows/push.yaml
vendored
12
.github/workflows/push.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
tags:
|
||||
- v*
|
||||
|
||||
@@ -58,6 +57,17 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Create .env file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
|
||||
34
.github/workflows/releaser.yaml
vendored
Normal file
34
.github/workflows/releaser.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: releaser
|
||||
|
||||
on:
|
||||
# trigger on new latest release
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
detect:
|
||||
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
release_type: ${{ steps.find.outputs.release_type }}
|
||||
steps:
|
||||
- id: find
|
||||
name: find
|
||||
run: |
|
||||
release_tag=${{ github.event.release.tag_name }}
|
||||
patch_number=$(echo $release_tag | awk -F. '{print $3}')
|
||||
release_type="minor"
|
||||
if [[ $patch_number -ne 0 ]]; then
|
||||
release_type="patch"
|
||||
fi
|
||||
echo "release_type=${release_type}" >> "$GITHUB_OUTPUT"
|
||||
charts:
|
||||
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [detect]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_REPOSITORY_NAME: charts
|
||||
GITHUB_EVENT_NAME: prereleaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"release_type\": \"${{ needs.detect.outputs.release_type }}\"}"
|
||||
1
.github/workflows/sonar.yml
vendored
1
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
|
||||
7
.github/workflows/staging-deployment.yaml
vendored
7
.github/workflows/staging-deployment.yaml
vendored
@@ -1,12 +1,12 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to develop branch
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest develop branch to staging
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
@@ -38,6 +38,7 @@ jobs:
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout develop
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -52,7 +52,7 @@ ee/query-service/tests/test-deploy/data/
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
|
||||
.local/
|
||||
*/query-service/queries.active
|
||||
|
||||
# e2e
|
||||
@@ -70,3 +70,9 @@ vendor/
|
||||
|
||||
# git-town
|
||||
.git-branches.toml
|
||||
|
||||
# goreleaser
|
||||
dist/
|
||||
|
||||
# ignore user_scripts that is fetched by init-clickhouse
|
||||
deploy/common/clickhouse/user_scripts/
|
||||
|
||||
10
.gitpod.yml
10
.gitpod.yml
@@ -3,16 +3,10 @@
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Run Script to Comment ut required lines
|
||||
init: |
|
||||
cd ./.scripts
|
||||
sh commentLinesForSetup.sh
|
||||
|
||||
- name: Run Docker Images
|
||||
init: |
|
||||
cd ./deploy
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
# command:
|
||||
cd ./deploy/docker
|
||||
sudo docker compose up -d
|
||||
|
||||
- name: Run Frontend
|
||||
init: |
|
||||
|
||||
@@ -141,9 +141,9 @@ Depending upon your area of expertise & interest, you can choose one or more to
|
||||
|
||||
# 3. Develop Frontend 🌚
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
|
||||
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
|
||||
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
@@ -151,14 +151,14 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
|
||||
|
||||

|
||||
|
||||
|
||||
- run `cd deploy` to move to deploy directory,
|
||||
- Install signoz locally **without** the frontend,
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
|
||||
```
|
||||
ports:
|
||||
- "8080:8080"
|
||||
@@ -167,9 +167,10 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
|
||||
- Next run,
|
||||
```
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
cd deploy/docker
|
||||
sudo docker compose up -d
|
||||
```
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
|
||||
If you have backend api exposed via frontend nginx:
|
||||
```
|
||||
@@ -186,11 +187,6 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||
@@ -216,7 +212,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
|
||||
|
||||
## 4.1 Prerequisites
|
||||
|
||||
@@ -242,13 +238,13 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
|
||||
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||
|
||||
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
|
||||
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
|
||||
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
|
||||
```
|
||||
ports:
|
||||
- 9001:9000
|
||||
@@ -258,9 +254,9 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||
- Then, you need to create a `.env` file with the following environment variable
|
||||
```
|
||||
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
|
||||
```
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
|
||||
|
||||
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
@@ -294,13 +290,10 @@ docker pull signoz/query-service:develop
|
||||
```
|
||||
|
||||
### Important Note:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
|
||||
|
||||
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
@@ -339,7 +332,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
@@ -362,7 +355,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
|
||||
48
Makefile
48
Makefile
@@ -8,14 +8,16 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
STANDALONE_DIRECTORY ?= deploy/docker
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm
|
||||
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
|
||||
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
@@ -33,8 +35,9 @@ buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
|
||||
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
|
||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
|
||||
all: build-push-frontend build-push-query-service
|
||||
@@ -79,7 +82,7 @@ build-query-service-static:
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||
else \
|
||||
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||
@@ -96,12 +99,12 @@ build-query-service-static-arm64:
|
||||
|
||||
# Steps to build static binary of query service for all platforms
|
||||
.PHONY: build-query-service-static-all
|
||||
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
|
||||
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
.PHONY: build-query-service-amd64 build-push-query-service
|
||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||
build-query-service-amd64: build-query-service-static-amd64
|
||||
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@@ -140,16 +143,6 @@ dev-setup:
|
||||
@echo "--> Local Setup completed"
|
||||
@echo "------------------"
|
||||
|
||||
run-local:
|
||||
@docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
up --build -d
|
||||
|
||||
down-local:
|
||||
@docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
down -v
|
||||
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
@@ -188,13 +181,16 @@ check-no-ee-references:
|
||||
fi
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/app/metrics/...
|
||||
go test ./pkg/query-service/cache/...
|
||||
go test ./pkg/query-service/app/...
|
||||
go test ./pkg/query-service/app/querier/...
|
||||
go test ./pkg/query-service/converter/...
|
||||
go test ./pkg/query-service/formatter/...
|
||||
go test ./pkg/query-service/tests/integration/...
|
||||
go test ./pkg/query-service/rules/...
|
||||
go test ./pkg/query-service/collectorsimulator/...
|
||||
go test ./pkg/query-service/postprocess/...
|
||||
go test ./pkg/...
|
||||
|
||||
goreleaser-snapshot:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
cd ${GORELEASER_WORKDIR} && \
|
||||
goreleaser release --clean --snapshot; \
|
||||
cd -; \
|
||||
else \
|
||||
goreleaser release --clean --snapshot; \
|
||||
fi
|
||||
|
||||
goreleaser-snapshot-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.md"><b>Readme auf Englisch </b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
206
README.md
206
README.md
@@ -1,8 +1,11 @@
|
||||
<p align="center">
|
||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||
<h1 align="center" style="border-bottom: none">
|
||||
<a href="https://signoz.io" target="_blank">
|
||||
<img alt="SigNoz" src="https://github.com/user-attachments/assets/ef9a33f7-12d7-4c94-8908-0a02b22f0c18" width="100" height="100">
|
||||
</a>
|
||||
<br>SigNoz
|
||||
</h1>
|
||||
|
||||
<p align="center">Monitor your applications and troubleshoot problems in your deployed applications, an open-source alternative to DataDog, New Relic, etc.</p>
|
||||
</p>
|
||||
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||
@@ -14,62 +17,122 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
|
||||
|
||||
👉 Visualise Metrics, Traces and Logs in a single pane of glass
|
||||
|
||||
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
|
||||
|
||||
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
|
||||
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
|
||||
|
||||
👉 Record exceptions automatically in Python, Java, Ruby, and Javascript
|
||||
|
||||
👉 Easy to set alerts with DIY query builder
|
||||
## Features
|
||||
|
||||
|
||||
### Application Metrics
|
||||
### Application Performance Monitoring
|
||||
|
||||

|
||||
Use SigNoz APM to monitor your applications and services. It comes with out-of-box charts for key application metrics like p99 latency, error rate, Apdex and operations per second. You can also monitor the database and external calls made from your application. Read [more](https://signoz.io/application-performance-monitoring/).
|
||||
|
||||
You can [instrument](https://signoz.io/docs/instrumentation/) your application with OpenTelemetry to get started.
|
||||
|
||||
### Distributed Tracing
|
||||
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||

|
||||
|
||||
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||
|
||||
### Logs Management
|
||||
|
||||
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||
SigNoz can be used as a centralized log management solution. We use ClickHouse (used by likes of Uber & Cloudflare) as a datastore, ⎯ an extremely fast and highly optimized storage for logs data. Instantly search through all your logs using quick filters and a powerful query builder.
|
||||
|
||||
### Infrastructure Monitoring
|
||||
You can also create charts on your logs and monitor them with customized dashboards. Read [more](https://signoz.io/log-management/).
|
||||
|
||||
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||

|
||||
|
||||
### Exceptions Monitoring
|
||||
|
||||

|
||||
### Distributed Tracing
|
||||
|
||||
Distributed Tracing is essential to troubleshoot issues in microservices applications. Powered by OpenTelemetry, distributed tracing in SigNoz can help you track user requests across services to help you identify performance bottlenecks.
|
||||
|
||||
See user requests in a detailed breakdown with the help of Flamegraphs and Gantt Charts. Click on any span to see the entire trace represented beautifully, which will help you make sense of where issues actually occurred in the flow of requests.
|
||||
|
||||
Read [more](https://signoz.io/distributed-tracing/).
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
### Metrics and Dashboards
|
||||
|
||||
Ingest metrics from your infrastructure or applications and create customized dashboards to monitor them. Create visualization that suits your needs with a variety of panel types like pie chart, time-series, bar chart, etc.
|
||||
|
||||
Create queries on your metrics data quickly with an easy-to-use metrics query builder. Add multiple queries and combine those queries with formulae to create really complex queries quickly.
|
||||
|
||||
Read [more](https://signoz.io/metrics-and-dashboards/).
|
||||
|
||||

|
||||
|
||||
### Alerts
|
||||
|
||||
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||
Use alerts in SigNoz to get notified when anything unusual happens in your application. You can set alerts on any type of telemetry signal (logs, metrics, traces), create thresholds and set up a notification channel to get notified. Advanced features like alert history and anomaly detection can help you create smarter alerts.
|
||||
|
||||
Alerts in SigNoz help you identify issues proactively so that you can address them before they reach your customers.
|
||||
|
||||
Read [more](https://signoz.io/alerts-management/).
|
||||
|
||||

|
||||
|
||||
### Exceptions Monitoring
|
||||
|
||||
Monitor exceptions automatically in Python, Java, Ruby, and Javascript. For other languages, just drop in a few lines of code and start monitoring exceptions.
|
||||
|
||||
See the detailed stack trace for all exceptions caught in your application. You can also log in custom attributes to add more context to your exceptions. For example, you can add attributes to identify users for which exceptions occurred.
|
||||
|
||||
Read [more](https://signoz.io/exceptions-monitoring/).
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
## Why SigNoz?
|
||||
|
||||
SigNoz is a single tool for all your monitoring and observability needs. Here are a few reasons why you should choose SigNoz:
|
||||
|
||||
- Single tool for observability(logs, metrics, and traces)
|
||||
|
||||
- Built on top of [OpenTelemetry](https://opentelemetry.io/), the open-source standard which frees you from any type of vendor lock-in
|
||||
|
||||
- Correlated logs, metrics and traces for much richer context while debugging
|
||||
|
||||
- Uses ClickHouse (used by likes of Uber & Cloudflare) as datastore - an extremely fast and highly optimized storage for observability data
|
||||
|
||||
- DIY Query builder, PromQL, and ClickHouse queries to fulfill all your use-cases around querying observability data
|
||||
|
||||
- Open-Source - you can use open-source, our [cloud service](https://signoz.io/teams/) or a mix of both based on your use case
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Create a SigNoz Cloud Account
|
||||
|
||||
SigNoz cloud is the easiest way to get started with SigNoz. Our cloud service is for those users who want to spend more time in getting insights for their application performance without worrying about maintenance.
|
||||
|
||||
[Get started for free](https://signoz.io/teams/)
|
||||
|
||||
### Deploy using Docker(self-hosted)
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
|
||||
|
||||
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
### Deploy in Kubernetes using Helm(self-hosted)
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
|
||||
|
||||
<br /><br />
|
||||
|
||||
We also offer managed services in your infra. Check our [pricing plans](https://signoz.io/pricing/) for all details.
|
||||
|
||||
|
||||
## Join our Slack community
|
||||
|
||||
@@ -78,64 +141,22 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||
<br /><br />
|
||||
|
||||
|
||||
## Features:
|
||||
|
||||
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
|
||||
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
|
||||
- Slowest endpoints in your application
|
||||
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
||||
- Filter traces by service name, operation, latency, error, tags/annotations.
|
||||
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
|
||||
- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
|
||||
- Easy to set alerts with DIY query builder
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
## Why SigNoz?
|
||||
|
||||
Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency.
|
||||
|
||||
We wanted to make a self-hosted & open source version of tools like DataDog, NewRelic for companies that have privacy and security concerns about having customer data going to third party services.
|
||||
|
||||
Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities
|
||||
|
||||
### Languages supported:
|
||||
|
||||
We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are:
|
||||
SigNoz supports all major programming languages for monitoring. Any framework and language supported by OpenTelemetry is supported by SigNoz. Find instructions for instrumenting different languages below:
|
||||
|
||||
- Java
|
||||
- Python
|
||||
- Node.js
|
||||
- Go
|
||||
- PHP
|
||||
- .NET
|
||||
- Ruby
|
||||
- Elixir
|
||||
- Rust
|
||||
- [Java](https://signoz.io/docs/instrumentation/java/)
|
||||
- [Python](https://signoz.io/docs/instrumentation/python/)
|
||||
- [Node.js or Javascript](https://signoz.io/docs/instrumentation/javascript/)
|
||||
- [Go](https://signoz.io/docs/instrumentation/golang/)
|
||||
- [PHP](https://signoz.io/docs/instrumentation/php/)
|
||||
- [.NET](https://signoz.io/docs/instrumentation/dotnet/)
|
||||
- [Ruby](https://signoz.io/docs/instrumentation/ruby-on-rails/)
|
||||
- [Elixir](https://signoz.io/docs/instrumentation/elixir/)
|
||||
- [Rust](https://signoz.io/docs/instrumentation/rust/)
|
||||
- [Swift](https://signoz.io/docs/instrumentation/swift/)
|
||||
|
||||
|
||||
You can find the complete list of languages here - https://opentelemetry.io/docs/
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
|
||||
|
||||
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
### Deploy in Kubernetes using Helm
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
|
||||
You can find our entire documentation [here](https://signoz.io/docs/introduction/).
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -144,9 +165,11 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
|
||||
|
||||
### SigNoz vs Prometheus
|
||||
|
||||
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
|
||||
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics, logs and traces, then current experience of stitching together Prometheus & other tools is not great.
|
||||
|
||||
Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
|
||||
SigNoz is a one-stop solution for metrics and other telemetry signals. And because you will use the same standard(OpenTelemetry) to collect all telemetry signals, you can also correlate these signals to troubleshoot quickly.
|
||||
|
||||
For example, if you see that there are issues with infrastructure metrics of your k8s cluster at a timestamp, you can jump to other signals like logs and traces to understand the issue quickly.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
@@ -158,6 +181,7 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
|
||||
- Jaegar UI doesn’t show any metrics on traces or on filtered traces
|
||||
- Jaeger can’t get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
|
||||
- You can also go from traces to logs easily in SigNoz
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>文档</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack 社区</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
95
conf/example.yaml
Normal file
95
conf/example.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
##################### SigNoz Configuration Example #####################
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
# The log level to use.
|
||||
level: info
|
||||
traces:
|
||||
# Whether to enable tracing.
|
||||
enabled: false
|
||||
processors:
|
||||
batch:
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: localhost:4317
|
||||
metrics:
|
||||
# Whether to enable metrics.
|
||||
enabled: true
|
||||
readers:
|
||||
pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: "0.0.0.0"
|
||||
port: 9090
|
||||
|
||||
##################### Web #####################
|
||||
web:
|
||||
# Whether to enable the web frontend
|
||||
enabled: true
|
||||
# The prefix to serve web on
|
||||
prefix: /
|
||||
# The directory containing the static build files.
|
||||
directory: /etc/signoz/web
|
||||
|
||||
##################### Cache #####################
|
||||
cache:
|
||||
# specifies the caching provider to use.
|
||||
provider: memory
|
||||
# memory: Uses in-memory caching.
|
||||
memory:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
host: localhost
|
||||
# The port on which the Redis server is running. Default is usually 6379.
|
||||
port: 6379
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
|
||||
##################### SQLStore #####################
|
||||
sqlstore:
|
||||
# specifies the SQLStore provider to use.
|
||||
provider: sqlite
|
||||
# The maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
sqlite:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
timeout:
|
||||
default: 60s
|
||||
max: 600s
|
||||
excluded_routes:
|
||||
- /api/v1/logs/tail
|
||||
- /api/v3/logs/livetail
|
||||
logging:
|
||||
excluded_routes:
|
||||
- /api/v1/health
|
||||
|
||||
|
||||
##################### TelemetryStore #####################
|
||||
telemetrystore:
|
||||
# specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
clickhouse:
|
||||
# The DSN to use for ClickHouse.
|
||||
dsn: http://localhost:9000
|
||||
# Maximum number of idle connections in the connection pool.
|
||||
max_idle_conns: 50
|
||||
# Maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
# Maximum time to wait for a connection to be established.
|
||||
dial_timeout: 5s
|
||||
@@ -18,65 +18,64 @@ Now run the following command to install:
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
|
||||
If you don't have docker compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
|
||||
to set up docker compose before proceeding with the next steps.
|
||||
|
||||
For x86 chip (amd):
|
||||
|
||||
```sh
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
cd deploy/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
|
||||
the data generated from hotrod in SigNoz UI.
|
||||
Open http://localhost:3301 in your favourite browser.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
### Using Helm
|
||||
|
||||
#### Bring up SigNoz cluster
|
||||
To start collecting logs and metrics from your infrastructure, run the following command:
|
||||
|
||||
```sh
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
|
||||
kubectl create ns platform
|
||||
|
||||
helm -n platform install my-release signoz/signoz
|
||||
cd generator/infra
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To access the UI, you can `port-forward` the frontend service:
|
||||
To start generating sample traces, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
|
||||
cd generator/hotrod
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
|
||||
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
|
||||
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
|
||||
|
||||
#### Test HotROD application with SigNoz
|
||||
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker/).
|
||||
|
||||
## Docker Swarm
|
||||
|
||||
To install SigNoz using Docker Swarm, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
cd deploy/docker-swarm
|
||||
docker stack deploy -c docker-compose.yaml signoz
|
||||
```
|
||||
|
||||
To generate load:
|
||||
Open http://localhost:3301 in your favourite browser.
|
||||
|
||||
To start collecting logs and metrics from your infrastructure, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||
cd generator/infra
|
||||
docker stack deploy -c docker-compose.yaml infra
|
||||
```
|
||||
|
||||
To stop load:
|
||||
To start generating sample traces, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
cd generator/hotrod
|
||||
docker stack deploy -c docker-compose.yaml hotrod
|
||||
```
|
||||
|
||||
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
|
||||
|
||||
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker-swarm/).
|
||||
|
||||
## Uninstall/Troubleshoot?
|
||||
|
||||
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.
|
||||
|
||||
|
||||
@@ -10,14 +10,14 @@
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</node>
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
@@ -58,7 +58,7 @@
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
@@ -69,7 +69,7 @@
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
</clickhouse>
|
||||
@@ -72,4 +72,4 @@
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
</clickhouse>
|
||||
@@ -370,7 +370,7 @@
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
|
||||
<!-- Disable AuthType plaintext_password and no_password for ACL. -->
|
||||
<!-- <allow_plaintext_password>0</allow_plaintext_password> -->
|
||||
<!-- <allow_no_password>0</allow_no_password> -->`
|
||||
@@ -652,12 +652,12 @@
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||
-->
|
||||
|
||||
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>example01-01-1</replica>
|
||||
</macros>
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
@@ -716,7 +716,7 @@
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
@@ -726,7 +726,6 @@
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
0
deploy/common/dashboards/.gitkeep
Normal file
0
deploy/common/dashboards/.gitkeep
Normal file
@@ -44,7 +44,7 @@ server {
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
@@ -12,10 +12,10 @@ alerting:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
rule_files: []
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
# - 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
0
deploy/docker-swarm/clickhouse-setup/.gitkeep
Normal file
0
deploy/docker-swarm/clickhouse-setup/.gitkeep
Normal file
@@ -1,35 +0,0 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -1,11 +0,0 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
@@ -1,75 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>clickhouse</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,288 +0,0 @@
|
||||
version: "3.9"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-db-depend: &db-depend
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- otel-collector-migrator
|
||||
# - clickhouse-2
|
||||
# - clickhouse-3
|
||||
|
||||
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.5
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.49.1
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.48.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.102.2
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- otel-collector-migrator
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.102.2
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
- clickhouse
|
||||
# - clickhouse-2
|
||||
# - clickhouse-3
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -1,31 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
spanID String CODEC(ZSTD(1)),
|
||||
parentSpanID String CODEC(ZSTD(1)),
|
||||
serviceName LowCardinality(String) CODEC(ZSTD(1)),
|
||||
name LowCardinality(String) CODEC(ZSTD(1)),
|
||||
kind Int32 CODEC(ZSTD(1)),
|
||||
durationNano UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
tagsKeys Array(String) CODEC(ZSTD(1)),
|
||||
tagsValues Array(String) CODEC(ZSTD(1)),
|
||||
statusCode Int64 CODEC(ZSTD(1)),
|
||||
references String CODEC(ZSTD(1)),
|
||||
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
|
||||
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
|
||||
component Nullable(String) CODEC(ZSTD(1)),
|
||||
dbSystem Nullable(String) CODEC(ZSTD(1)),
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (serviceName, -toUnixTimestamp(timestamp))
|
||||
@@ -1,182 +0,0 @@
|
||||
receivers:
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
signozspanmetrics/cumulative:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
enable_exp_histogram: true
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
# logging: {}
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 10s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -1,25 +0,0 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/signoz_metrics
|
||||
@@ -1,51 +0,0 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||
proxy_pass http://query-service:8080;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# dont buffer the data send it directly to client.
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
281
deploy/docker-swarm/docker-compose.ha.yaml
Normal file
281
deploy/docker-swarm/docker-compose.ha.yaml
Normal file
@@ -0,0 +1,281 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
- zookeeper-2
|
||||
- zookeeper-3
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- clickhouse-2
|
||||
- clickhouse-3
|
||||
- schema-migrator
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-2:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/zookeeper-2:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=2
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-3:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/zookeeper-3:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=3
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
# TODO: needed for schema-migrator to work, remove this redundancy once we have a better solution
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-2:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-3:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:0.23.7
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/alertmanager:/data
|
||||
depends_on:
|
||||
- query-service
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:0.70.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:0.70.1
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:0.111.25
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
deploy:
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- query-service
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:0.111.24
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
entrypoint: sh
|
||||
command:
|
||||
- -c
|
||||
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
|
||||
depends_on:
|
||||
- clickhouse
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
clickhouse-2:
|
||||
name: signoz-clickhouse-2
|
||||
clickhouse-3:
|
||||
name: signoz-clickhouse-3
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
zookeeper-2:
|
||||
name: signoz-zookeeper-2
|
||||
zookeeper-3:
|
||||
name: signoz-zookeeper-3
|
||||
209
deploy/docker-swarm/docker-compose.yaml
Normal file
209
deploy/docker-swarm/docker-compose.yaml
Normal file
@@ -0,0 +1,209 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
- init-clickhouse
|
||||
- zookeeper-1
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
# TODO: needed for clickhouse TCP connectio
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:0.23.7
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
- query-service
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:0.70.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:0.70.1
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:0.111.25
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
deploy:
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- query-service
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:0.111.24
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
entrypoint: sh
|
||||
command:
|
||||
- -c
|
||||
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
|
||||
depends_on:
|
||||
- clickhouse
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
38
deploy/docker-swarm/generator/hotrod/docker-compose.yaml
Normal file
38
deploy/docker-swarm/generator/hotrod/docker-compose.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
services:
|
||||
hotrod:
|
||||
<<: *common
|
||||
image: jaegertracing/example-hotrod:1.61.0
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 #
|
||||
load-hotrod:
|
||||
<<: *common
|
||||
image: "signoz/locust:1.2.3"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../../../common/locust-scripts:/locust
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
69
deploy/docker-swarm/generator/infra/docker-compose.yaml
Normal file
69
deploy/docker-swarm/generator/infra/docker-compose.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
services:
|
||||
otel-agent:
|
||||
<<: *common
|
||||
image: otel/opentelemetry-collector-contrib:0.111.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-agent-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
|
||||
# Before exposing the ports, make sure the ports are not used by other services
|
||||
# ports:
|
||||
# - "4317:4317"
|
||||
# - "4318:4318"
|
||||
otel-metrics:
|
||||
<<: *common
|
||||
image: otel/opentelemetry-collector-contrib:0.111.0
|
||||
user: 0:0 # If you have security concerns, you can replace this with your `UID:GID` that has necessary permissions to docker.sock
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-metrics-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
|
||||
# Before exposing the ports, make sure the ports are not used by other services
|
||||
# ports:
|
||||
# - "4317:4317"
|
||||
# - "4318:4318"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
logspout:
|
||||
<<: *common
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
command: syslog+tcp://otel-agent:2255
|
||||
user: root
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
depends_on:
|
||||
- otel-agent
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
102
deploy/docker-swarm/generator/infra/otel-agent-config.yaml
Normal file
102
deploy/docker-swarm/generator/infra/otel-agent-config.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
receivers:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-agent
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-agent
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors:
|
||||
# - ec2
|
||||
# - gcp
|
||||
# - azure
|
||||
- env
|
||||
- system
|
||||
timeout: 2s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
|
||||
tls:
|
||||
insecure: true
|
||||
headers:
|
||||
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
103
deploy/docker-swarm/generator/infra/otel-metrics-config.yaml
Normal file
103
deploy/docker-swarm/generator/infra/otel-metrics-config.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
receivers:
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-metrics
|
||||
# For Docker daemon metrics to be scraped, it must be configured to expose
|
||||
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
|
||||
# - job_name: docker-daemon
|
||||
# dockerswarm_sd_configs:
|
||||
# - host: unix:///var/run/docker.sock
|
||||
# role: nodes
|
||||
# relabel_configs:
|
||||
# - source_labels: [__meta_dockerswarm_node_address]
|
||||
# target_label: __address__
|
||||
# replacement: $1:9323
|
||||
- job_name: "dockerswarm"
|
||||
dockerswarm_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
role: tasks
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: running
|
||||
source_labels:
|
||||
- __meta_dockerswarm_task_desired_state
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_dockerswarm_service_label_signoz_io_scrape
|
||||
- regex: ([^:]+)(?::\d+)?
|
||||
replacement: $1
|
||||
source_labels:
|
||||
- __address__
|
||||
target_label: swarm_container_ip
|
||||
- separator: .
|
||||
source_labels:
|
||||
- __meta_dockerswarm_service_name
|
||||
- __meta_dockerswarm_task_slot
|
||||
- __meta_dockerswarm_task_id
|
||||
target_label: swarm_container_name
|
||||
- target_label: __address__
|
||||
source_labels:
|
||||
- swarm_container_ip
|
||||
- __meta_dockerswarm_service_label_signoz_io_port
|
||||
separator: ":"
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_service_label_signoz_io_path
|
||||
target_label: __metrics_path__
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_service_label_com_docker_stack_namespace
|
||||
target_label: namespace
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_service_name
|
||||
target_label: service_name
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_task_id
|
||||
target_label: service_instance_id
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_node_hostname
|
||||
target_label: host_name
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
detectors:
|
||||
- env
|
||||
- system
|
||||
timeout: 2s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
|
||||
tls:
|
||||
insecure: true
|
||||
headers:
|
||||
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
101
deploy/docker-swarm/otel-collector-config.yaml
Normal file
101
deploy/docker-swarm/otel-collector-config.yaml
Normal file
@@ -0,0 +1,101 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
enable_exp_histogram: true
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhousemetricswritev2:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
1
deploy/docker/.env
Normal file
1
deploy/docker/.env
Normal file
@@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=signoz
|
||||
3
deploy/docker/clickhouse-setup/.deprecated
Normal file
3
deploy/docker/clickhouse-setup/.deprecated
Normal file
@@ -0,0 +1,3 @@
|
||||
This data directory is deprecated and will be removed in the future.
|
||||
Please use the migration script under `scripts/volume-migration` to migrate data from bind mounts to Docker volumes.
|
||||
The script also renames the project name to `signoz` and the network name to `signoz-net` (if not already in place).
|
||||
0
deploy/docker/clickhouse-setup/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/.gitkeep
Normal file
@@ -1,35 +0,0 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -1,11 +0,0 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
@@ -1,41 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<!-- For S3 cold storage,
|
||||
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
|
||||
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
|
||||
For GCS cold storage,
|
||||
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
|
||||
-->
|
||||
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
<!-- In case of S3, uncomment the below configuration in case you want to read
|
||||
AWS credentials from the Environment variables if they exist. -->
|
||||
<!-- <use_environment_credentials>true</use_environment_credentials> -->
|
||||
<!-- In case of GCS, uncomment the below configuration, since GCS does
|
||||
not support batch deletion and result in error messages in logs. -->
|
||||
<!-- <support_batch_delete>false</support_batch_delete> -->
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
@@ -1,123 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -1,156 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
container_name: signoz-alertmanager
|
||||
image: signoz/alertmanager:0.23.5
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.2}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.102.2
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--copy-path=/var/tmp/collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -1,67 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
query-service:
|
||||
hostname: query-service
|
||||
build:
|
||||
context: "../../../"
|
||||
dockerfile: "./pkg/query-service/Dockerfile"
|
||||
args:
|
||||
LDFLAGS: ""
|
||||
TARGETPLATFORM: "${GOOS}/${GOARCH}"
|
||||
container_name: signoz-query-service
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
- "8080:8080"
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: "../../../frontend"
|
||||
dockerfile: "./Dockerfile"
|
||||
args:
|
||||
TARGETOS: "${GOOS}"
|
||||
TARGETPLATFORM: "${GOARCH}"
|
||||
container_name: signoz-frontend
|
||||
environment:
|
||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
@@ -1,308 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-db-depend: &db-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
|
||||
container_name: signoz-alertmanager
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.49.1}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"-gateway-url=https://api.staging.signoz.cloud"
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.49.1}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.2}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.2}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--copy-path=/var/tmp/collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -1,307 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-db-depend: &db-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
|
||||
container_name: signoz-alertmanager
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.49.1}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml"
|
||||
# "--prefer-delta=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.49.1}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.2}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.2}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--copy-path=/var/tmp/collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -1,64 +0,0 @@
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<!-- Possible levels [1]:
|
||||
|
||||
- none (turns off logging)
|
||||
- fatal
|
||||
- critical
|
||||
- error
|
||||
- warning
|
||||
- notice
|
||||
- information
|
||||
- debug
|
||||
- trace
|
||||
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>information</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
|
||||
-->
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
</logger>
|
||||
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
<max_connections>4096</max_connections>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
|
||||
<!-- Must be unique among all keeper serves -->
|
||||
<server_id>1</server_id>
|
||||
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||
<min_session_timeout_ms>10000</min_session_timeout_ms>
|
||||
<session_timeout_ms>100000</session_timeout_ms>
|
||||
<raft_logs_level>information</raft_logs_level>
|
||||
<compress_logs>false</compress_logs>
|
||||
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
|
||||
</coordination_settings>
|
||||
|
||||
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
|
||||
<hostname_checks_enabled>true</hostname_checks_enabled>
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
|
||||
<!-- Internal port and hostname -->
|
||||
<hostname>clickhouses-keeper-1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
|
||||
<!-- Add more servers here -->
|
||||
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
||||
@@ -1,189 +0,0 @@
|
||||
receivers:
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
signozspanmetrics/cumulative:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
enable_exp_histogram: true
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 10s
|
||||
# logging: {}
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -1 +0,0 @@
|
||||
server_endpoint: ws://query-service:4320/v1/opamp
|
||||
@@ -1,25 +0,0 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/signoz_metrics
|
||||
2
deploy/docker/clickhouse-setup/user_scripts/.deprecated
Normal file
2
deploy/docker/clickhouse-setup/user_scripts/.deprecated
Normal file
@@ -0,0 +1,2 @@
|
||||
This directory is deprecated and will be removed in the future.
|
||||
Please use the new directory for Clickhouse setup scripts: `scripts/clickhouse` instead.
|
||||
@@ -1,16 +0,0 @@
|
||||
from locust import HttpUser, task, between
|
||||
class UserTasks(HttpUser):
|
||||
wait_time = between(5, 15)
|
||||
|
||||
@task
|
||||
def rachel(self):
|
||||
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
|
||||
@task
|
||||
def trom(self):
|
||||
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
|
||||
@task
|
||||
def japanese(self):
|
||||
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
|
||||
@task
|
||||
def coffee(self):
|
||||
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
|
||||
299
deploy/docker/docker-compose.ha.yaml
Normal file
299
deploy/docker/docker-compose.ha.yaml
Normal file
@@ -0,0 +1,299 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
zookeeper-2:
|
||||
condition: service_healthy
|
||||
zookeeper-3:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-2:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-2
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
volumes:
|
||||
- zookeeper-2:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=2
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-3:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-3
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
volumes:
|
||||
- zookeeper-3:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=3
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-2:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse-2:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-3:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse-3:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.70.1}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "3301:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.70.1}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.25}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
clickhouse-2:
|
||||
name: signoz-clickhouse-2
|
||||
clickhouse-3:
|
||||
name: signoz-clickhouse-3
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
zookeeper-2:
|
||||
name: signoz-zookeeper-2
|
||||
zookeeper-3:
|
||||
name: signoz-zookeeper-3
|
||||
221
deploy/docker/docker-compose.testing.yaml
Normal file
221
deploy/docker/docker-compose.testing.yaml
Normal file
@@ -0,0 +1,221 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.70.1}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --gateway-url=https://api.staging.signoz.cloud
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.70.1}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.25}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
219
deploy/docker/docker-compose.yaml
Normal file
219
deploy/docker/docker-compose.yaml
Normal file
@@ -0,0 +1,219 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.70.1}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "3301:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.70.1}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.25}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
39
deploy/docker/generator/hotrod/docker-compose.yaml
Normal file
39
deploy/docker/generator/hotrod/docker-compose.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
restart: on-failure
|
||||
services:
|
||||
hotrod:
|
||||
<<: *common
|
||||
image: jaegertracing/example-hotrod:1.61.0
|
||||
container_name: hotrod
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
# - OTEL_OTLP_HEADERS=signoz-access-token=<your-access-token>
|
||||
load-hotrod:
|
||||
<<: *common
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../../../common/locust-scripts:/locust
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
43
deploy/docker/generator/infra/docker-compose.yaml
Normal file
43
deploy/docker/generator/infra/docker-compose.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
restart: on-failure
|
||||
services:
|
||||
otel-agent:
|
||||
<<: *common
|
||||
image: otel/opentelemetry-collector-contrib:0.111.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /:/hostfs:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux # Replace signoz-host with the actual hostname
|
||||
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
|
||||
# Before exposing the ports, make sure the ports are not used by other services
|
||||
# ports:
|
||||
# - "4317:4317"
|
||||
# - "4318:4318"
|
||||
logspout:
|
||||
<<: *common
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-agent:2255
|
||||
depends_on:
|
||||
- otel-agent
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
139
deploy/docker/generator/infra/otel-collector-config.yaml
Normal file
139
deploy/docker/generator/infra/otel-collector-config.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
receivers:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
# For Docker daemon metrics to be scraped, it must be configured to expose
|
||||
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
|
||||
# - job_name: docker-daemon
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - host.docker.internal:9323
|
||||
# labels:
|
||||
# job_name: docker-daemon
|
||||
- job_name: docker-container
|
||||
docker_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_docker_container_label_signoz_io_scrape
|
||||
- regex: true
|
||||
source_labels:
|
||||
- __meta_docker_container_label_signoz_io_path
|
||||
target_label: __metrics_path__
|
||||
- regex: (.+)
|
||||
source_labels:
|
||||
- __meta_docker_container_label_signoz_io_path
|
||||
target_label: __metrics_path__
|
||||
- separator: ":"
|
||||
source_labels:
|
||||
- __meta_docker_network_ip
|
||||
- __meta_docker_container_label_signoz_io_port
|
||||
target_label: __address__
|
||||
- regex: '/(.*)'
|
||||
replacement: '$1'
|
||||
source_labels:
|
||||
- __meta_docker_container_name
|
||||
target_label: container_name
|
||||
- regex: __meta_docker_container_label_signoz_io_(.+)
|
||||
action: labelmap
|
||||
replacement: $1
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors:
|
||||
# - ec2
|
||||
# - gcp
|
||||
# - azure
|
||||
- env
|
||||
- system
|
||||
timeout: 2s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
|
||||
tls:
|
||||
insecure: true
|
||||
headers:
|
||||
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
101
deploy/docker/otel-collector-config.yaml
Normal file
101
deploy/docker/otel-collector-config.yaml
Normal file
@@ -0,0 +1,101 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
enable_exp_histogram: true
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhousemetricswritev2:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
# Variables
|
||||
BASE_DIR="$(dirname "$(readlink -f "$0")")"
|
||||
DOCKER_STANDALONE_DIR="docker"
|
||||
DOCKER_SWARM_DIR="docker-swarm" # TODO: Add docker swarm support
|
||||
|
||||
# Regular Colors
|
||||
Black='\033[0;30m' # Black
|
||||
Red='\[\e[0;31m\]' # Red
|
||||
@@ -32,6 +37,11 @@ has_cmd() {
|
||||
command -v "$1" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check if docker compose plugin is present
|
||||
has_docker_compose_plugin() {
|
||||
docker compose version > /dev/null 2>&1
|
||||
}
|
||||
|
||||
is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
@@ -183,9 +193,7 @@ install_docker() {
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
compose_version () {
|
||||
@@ -222,17 +230,11 @@ start_docker() {
|
||||
echo -e "🐳 Starting Docker ...\n"
|
||||
if [[ $os == "Mac" ]]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
else
|
||||
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||
echo "Starting docker service"
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
# if [[ -z $sudo_cmd ]]; then
|
||||
# docker ps > /dev/null && true
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# request_sudo
|
||||
# fi
|
||||
# fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
if ! docker ps > /dev/null && true; then
|
||||
request_sudo
|
||||
@@ -260,12 +262,15 @@ wait_for_containers_start() {
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
# Switch back to the original directory
|
||||
popd > /dev/null 2>&1
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
set +o errexit
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo -e "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo -e "$sudo_cmd $docker_compose_cmd ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
@@ -296,11 +301,6 @@ request_sudo() {
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompted."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
# exit 1;
|
||||
# fi
|
||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
@@ -317,6 +317,7 @@ echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
docker_compose_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
@@ -362,28 +363,8 @@ else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
@@ -455,8 +436,6 @@ if [[ $desired_os -eq 0 ]]; then
|
||||
send_event "os_not_supported"
|
||||
fi
|
||||
|
||||
# check_ports_occupied
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
@@ -486,27 +465,42 @@ if ! is_command_present docker; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if has_docker_compose_plugin; then
|
||||
echo "docker compose plugin is present, using it"
|
||||
docker_compose_cmd="docker compose"
|
||||
# Install docker-compose
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
else
|
||||
docker_compose_cmd="docker-compose"
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
fi
|
||||
|
||||
start_docker
|
||||
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
# Switch to the Docker Standalone directory
|
||||
pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
|
||||
|
||||
# check for open ports, if signoz is not installed
|
||||
if is_command_present docker-compose; then
|
||||
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
|
||||
echo "SigNoz already installed, skipping the occupied ports check"
|
||||
else
|
||||
check_ports_occupied
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
$sudo_cmd $docker_compose_cmd pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
$sudo_cmd $docker_compose_cmd up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -516,7 +510,14 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo "$sudo_cmd $docker_compose_cmd ps -a"
|
||||
echo ""
|
||||
|
||||
echo "Try bringing down the containers and retrying the installation"
|
||||
echo "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo "$sudo_cmd $docker_compose_cmd down -v"
|
||||
echo ""
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
@@ -534,10 +535,13 @@ else
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes:"
|
||||
echo ""
|
||||
echo "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo "$sudo_cmd $docker_compose_cmd down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
@@ -552,7 +556,7 @@ else
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
|
||||
send_event "identify_successful_installation"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.18.6
|
||||
FROM alpine:3.20.3
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -23,6 +23,9 @@ COPY pkg/query-service/templates /root/templates
|
||||
# Make query-service executable for non-root users
|
||||
RUN chmod 755 /root /root/query-service
|
||||
|
||||
# Copy frontend
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
# run the binary
|
||||
ENTRYPOINT ["./query-service"]
|
||||
|
||||
|
||||
@@ -16,6 +16,10 @@ const (
|
||||
SeasonalityWeekly Seasonality = "weekly"
|
||||
)
|
||||
|
||||
func (s Seasonality) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
var (
|
||||
oneWeekOffset = 24 * 7 * time.Hour.Milliseconds()
|
||||
oneDayOffset = 24 * time.Hour.Milliseconds()
|
||||
@@ -55,7 +59,7 @@ type anomalyQueryParams struct {
|
||||
// The results obtained from this query are used to compare with predicted values
|
||||
// and to detect anomalies
|
||||
CurrentPeriodQuery *v3.QueryRangeParamsV3
|
||||
// PastPeriodQuery is the query range params for past seasonal period
|
||||
// PastPeriodQuery is the query range params for past period of seasonality
|
||||
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||
@@ -70,7 +74,6 @@ type anomalyQueryParams struct {
|
||||
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||
PastSeasonQuery *v3.QueryRangeParamsV3
|
||||
|
||||
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||
@@ -140,13 +143,13 @@ func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonali
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
currentGrowthPeriodStart = start - oneWeekOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityDaily:
|
||||
currentGrowthPeriodStart = start - oneDayOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityHourly:
|
||||
currentGrowthPeriodStart = start - oneHourOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
currentGrowthPeriodEnd = start
|
||||
}
|
||||
|
||||
currentGrowthQuery := &v3.QueryRangeParamsV3{
|
||||
|
||||
@@ -67,6 +67,7 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,6 +78,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -87,6 +89,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -97,6 +100,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -107,6 +111,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -117,6 +122,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -184,14 +190,15 @@ func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSi
|
||||
return 0
|
||||
}
|
||||
if startIdx >= len(series.Points)-movingAvgWindowSize {
|
||||
startIdx = len(series.Points) - movingAvgWindowSize
|
||||
startIdx = int(math.Max(0, float64(len(series.Points)-movingAvgWindowSize)))
|
||||
}
|
||||
var sum float64
|
||||
points := series.Points[startIdx:]
|
||||
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
|
||||
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
|
||||
for i := 0; i < windowSize; i++ {
|
||||
sum += points[i].Value
|
||||
}
|
||||
avg := sum / float64(movingAvgWindowSize)
|
||||
avg := sum / float64(windowSize)
|
||||
return avg
|
||||
}
|
||||
|
||||
@@ -220,21 +227,25 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
// plus the average of the current season series
|
||||
// minus the mean of the past season series, past2 season series and past3 season series
|
||||
for idx, curr := range series.Points {
|
||||
predictedValue :=
|
||||
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
|
||||
p.getAvg(currentSeasonSeries) -
|
||||
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
avg := p.getAvg(currentSeasonSeries)
|
||||
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
predictedValue := movingAvg + avg - mean
|
||||
|
||||
if predictedValue < 0 {
|
||||
// this should not happen (except when the data has extreme outliers)
|
||||
// we will use the moving avg of the previous period series in this case
|
||||
zap.L().Warn("predictedValue is less than 0", zap.Float64("predictedValue", predictedValue), zap.Any("labels", series.Labels))
|
||||
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
|
||||
zap.L().Info("predictedSeries",
|
||||
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
|
||||
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
|
||||
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
|
||||
zap.L().Debug("predictedSeries",
|
||||
zap.Float64("movingAvg", movingAvg),
|
||||
zap.Float64("avg", avg),
|
||||
zap.Float64("mean", mean),
|
||||
zap.Any("labels", series.Labels),
|
||||
zap.Float64("predictedValue", predictedValue),
|
||||
zap.Float64("curr", curr.Value),
|
||||
)
|
||||
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
@@ -250,7 +261,7 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||
func (p *BaseSeasonalProvider) getBounds(
|
||||
series, prevSeries, _, _, _, _ *v3.Series,
|
||||
series, predictedSeries *v3.Series,
|
||||
zScoreThreshold float64,
|
||||
) (*v3.Series, *v3.Series) {
|
||||
upperBoundSeries := &v3.Series{
|
||||
@@ -266,8 +277,8 @@ func (p *BaseSeasonalProvider) getBounds(
|
||||
}
|
||||
|
||||
for idx, curr := range series.Points {
|
||||
upperBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||
lowerBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||
upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: upperBound,
|
||||
@@ -431,11 +442,7 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomali
|
||||
|
||||
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
predictedSeries,
|
||||
zScoreThreshold,
|
||||
)
|
||||
result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
@@ -25,21 +26,21 @@ type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
UsageManager *usage.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -54,17 +55,16 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
MaxIdleConns: opts.MaxIdleConns,
|
||||
MaxOpenConns: opts.MaxOpenConns,
|
||||
DialTimeout: opts.DialTimeout,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -112,13 +112,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
am.AdminAccess(ah.listLicenses)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
am.AdminAccess(ah.applyLicense)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
am.OpenAccess(ah.getFeatureFlags)).
|
||||
@@ -173,17 +166,33 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v2/licenses",
|
||||
am.ViewAccess(ah.listLicensesV2)).
|
||||
Methods(http.MethodGet)
|
||||
// v3
|
||||
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
|
||||
|
||||
// v4
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
|
||||
// Gateway
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
||||
|
||||
ah.APIHandler.RegisterRoutes(router, am)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
|
||||
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||
|
||||
router.HandleFunc(
|
||||
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
|
||||
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
versionResponse := basemodel.GetVersionResponse{
|
||||
|
||||
425
ee/query-service/app/api/cloudIntegrations.go
Normal file
425
ee/query-service/app/api/cloudIntegrations.go
Normal file
@@ -0,0 +1,425 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type CloudIntegrationConnectionParamsResponse struct {
|
||||
IngestionUrl string `json:"ingestion_url,omitempty"`
|
||||
IngestionKey string `json:"ingestion_key,omitempty"`
|
||||
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
|
||||
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
if cloudProvider != "aws" {
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf(
|
||||
"cloud provider not supported: %s", cloudProvider,
|
||||
)), nil)
|
||||
return
|
||||
}
|
||||
|
||||
currentUser, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
|
||||
"couldn't deduce current user: %w", err,
|
||||
)), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't provision PAT for cloud integration:",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result := CloudIntegrationConnectionParamsResponse{
|
||||
SigNozAPIKey: apiKey,
|
||||
}
|
||||
|
||||
license, apiErr := ah.LM().GetRepo().GetActiveLicense(r.Context())
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't look for active license",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if license == nil {
|
||||
// Return the API Key (PAT) even if the rest of the params can not be deduced.
|
||||
// Params not returned from here will be requested from the user via form inputs.
|
||||
// This enables gracefully degraded but working experience even for non-cloud deployments.
|
||||
zap.L().Info("ingestion params and signoz api url can not be deduced since no license was found")
|
||||
ah.Respond(w, result)
|
||||
return
|
||||
}
|
||||
|
||||
ingestionUrl, signozApiUrl, apiErr := getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't deduce ingestion url and signoz api url",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result.IngestionUrl = ingestionUrl
|
||||
result.SigNozAPIUrl = signozApiUrl
|
||||
|
||||
gatewayUrl := ah.opts.GatewayUrl
|
||||
if len(gatewayUrl) > 0 {
|
||||
|
||||
ingestionKey, apiErr := getOrCreateCloudProviderIngestionKey(
|
||||
r.Context(), gatewayUrl, license.Key, cloudProvider,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't get or create ingestion key",
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
result.IngestionKey = ingestionKey
|
||||
|
||||
} else {
|
||||
zap.L().Info("ingestion key can't be deduced since no gateway url has been configured")
|
||||
}
|
||||
|
||||
ah.Respond(w, result)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider string) (
|
||||
string, *basemodel.ApiError,
|
||||
) {
|
||||
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
|
||||
|
||||
integrationUser, apiErr := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return "", apiErr
|
||||
}
|
||||
|
||||
allPats, err := ah.AppDao().ListPATs(ctx)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't list PATs: %w", err.Error(),
|
||||
))
|
||||
}
|
||||
for _, p := range allPats {
|
||||
if p.UserID == integrationUser.Id && p.Name == integrationPATName {
|
||||
return p.Token, nil
|
||||
}
|
||||
}
|
||||
|
||||
zap.L().Info(
|
||||
"no PAT found for cloud integration, creating a new one",
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newPAT := model.PAT{
|
||||
Token: generatePATToken(),
|
||||
UserID: integrationUser.Id,
|
||||
Name: integrationPATName,
|
||||
Role: baseconstants.ViewerGroup,
|
||||
ExpiresAt: 0,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
UpdatedAt: time.Now().Unix(),
|
||||
}
|
||||
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err.Error(),
|
||||
))
|
||||
}
|
||||
return integrationPAT.Token, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*basemodel.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
|
||||
}
|
||||
|
||||
if integrationUserResult != nil {
|
||||
return &integrationUserResult.User, nil
|
||||
}
|
||||
|
||||
zap.L().Info(
|
||||
"cloud integration user not found. Attempting to create the user",
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newUser := &basemodel.User{
|
||||
Id: cloudIntegrationUserId,
|
||||
Name: fmt.Sprintf("%s integration", cloudProvider),
|
||||
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
|
||||
CreatedAt: time.Now().Unix(),
|
||||
OrgId: orgId,
|
||||
}
|
||||
|
||||
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
|
||||
}
|
||||
newUser.GroupId = viewerGroup.Id
|
||||
|
||||
passwordHash, err := auth.PasswordHash(uuid.NewString())
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't hash random password for cloud integration user: %w", err,
|
||||
))
|
||||
}
|
||||
newUser.Password = passwordHash
|
||||
|
||||
integrationUser, apiErr := ah.AppDao().CreateUser(ctx, newUser, false)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't create cloud integration user")
|
||||
}
|
||||
|
||||
return integrationUser, nil
|
||||
}
|
||||
|
||||
func getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||
string, string, *basemodel.ApiError,
|
||||
) {
|
||||
url := fmt.Sprintf(
|
||||
"%s%s",
|
||||
strings.TrimSuffix(constants.ZeusURL, "/"),
|
||||
"/v2/deployments/me",
|
||||
)
|
||||
|
||||
type deploymentResponse struct {
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
Data struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
resp, apiErr := requestAndParseResponse[deploymentResponse](
|
||||
ctx, url, map[string]string{"X-Signoz-Cloud-Api-Key": licenseKey}, nil,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
return "", "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't query for deployment info",
|
||||
)
|
||||
}
|
||||
|
||||
if resp.Status != "success" {
|
||||
return "", "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't query for deployment info: status: %s, error: %s",
|
||||
resp.Status, resp.Error,
|
||||
))
|
||||
}
|
||||
|
||||
regionDns := resp.Data.ClusterInfo.Region.DNS
|
||||
deploymentName := resp.Data.Name
|
||||
|
||||
if len(regionDns) < 1 || len(deploymentName) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", "", basemodel.InternalError(fmt.Errorf(
|
||||
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
|
||||
))
|
||||
}
|
||||
|
||||
ingestionUrl := fmt.Sprintf("https://ingest.%s", regionDns)
|
||||
|
||||
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
|
||||
|
||||
return ingestionUrl, signozApiUrl, nil
|
||||
}
|
||||
|
||||
type ingestionKey struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
// other attributes from gateway response not included here since they are not being used.
|
||||
}
|
||||
|
||||
type ingestionKeysSearchResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data []ingestionKey `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type createIngestionKeyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data ingestionKey `json:"data"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func getOrCreateCloudProviderIngestionKey(
|
||||
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider string,
|
||||
) (string, *basemodel.ApiError) {
|
||||
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
|
||||
// see if the key already exists
|
||||
searchResult, apiErr := requestGateway[ingestionKeysSearchResponse](
|
||||
ctx,
|
||||
gatewayUrl,
|
||||
licenseKey,
|
||||
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
|
||||
nil,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
return "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't search for cloudprovider ingestion key",
|
||||
)
|
||||
}
|
||||
|
||||
if searchResult.Status != "success" {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't search for cloudprovider ingestion key: status: %s, error: %s",
|
||||
searchResult.Status, searchResult.Error,
|
||||
))
|
||||
}
|
||||
|
||||
for _, k := range searchResult.Data {
|
||||
if k.Name == cloudProviderKeyName {
|
||||
if len(k.Value) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"ingestion keys search response not as expected",
|
||||
))
|
||||
}
|
||||
|
||||
return k.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
zap.L().Info(
|
||||
"no existing ingestion key found for cloud integration, creating a new one",
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
createKeyResult, apiErr := requestGateway[createIngestionKeyResponse](
|
||||
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
|
||||
map[string]any{
|
||||
"name": cloudProviderKeyName,
|
||||
"tags": []string{"integration", cloudProvider},
|
||||
},
|
||||
)
|
||||
if apiErr != nil {
|
||||
return "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't create cloudprovider ingestion key",
|
||||
)
|
||||
}
|
||||
|
||||
if createKeyResult.Status != "success" {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloudprovider ingestion key: status: %s, error: %s",
|
||||
createKeyResult.Status, createKeyResult.Error,
|
||||
))
|
||||
}
|
||||
|
||||
ingestionKey := createKeyResult.Data.Value
|
||||
if len(ingestionKey) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"ingestion key creation response not as expected",
|
||||
))
|
||||
}
|
||||
|
||||
return ingestionKey, nil
|
||||
}
|
||||
|
||||
func requestGateway[ResponseType any](
|
||||
ctx context.Context, gatewayUrl string, licenseKey string, path string, payload any,
|
||||
) (*ResponseType, *basemodel.ApiError) {
|
||||
|
||||
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
|
||||
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
|
||||
|
||||
headers := map[string]string{
|
||||
"X-Signoz-Cloud-Api-Key": licenseKey,
|
||||
"X-Consumer-Username": "lid:00000000-0000-0000-0000-000000000000",
|
||||
"X-Consumer-Groups": "ns:default",
|
||||
}
|
||||
|
||||
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload)
|
||||
}
|
||||
|
||||
func requestAndParseResponse[ResponseType any](
|
||||
ctx context.Context, url string, headers map[string]string, payload any,
|
||||
) (*ResponseType, *basemodel.ApiError) {
|
||||
|
||||
reqMethod := http.MethodGet
|
||||
var reqBody io.Reader
|
||||
if payload != nil {
|
||||
reqMethod = http.MethodPost
|
||||
|
||||
bodyJson, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't serialize request payload to JSON: %w", err,
|
||||
))
|
||||
}
|
||||
reqBody = bytes.NewBuffer([]byte(bodyJson))
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't prepare request: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't make request: %w", err))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't read response: %w", err))
|
||||
}
|
||||
|
||||
var resp ResponseType
|
||||
|
||||
err = json.Unmarshal(respBody, &resp)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't unmarshal gateway response into %T", resp,
|
||||
))
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
@@ -9,7 +9,15 @@ import (
|
||||
|
||||
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) {
|
||||
validPath := false
|
||||
for _, allowedPrefix := range gateway.AllowedPrefix {
|
||||
if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) {
|
||||
validPath = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !validPath {
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -9,6 +8,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -59,33 +59,64 @@ type billingDetails struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
}
|
||||
ah.Respond(w, licenses)
|
||||
type ApplyLicenseRequest struct {
|
||||
LicenseKey string `json:"key"`
|
||||
}
|
||||
|
||||
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||
var l model.License
|
||||
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
ah.listLicensesV2(w, r)
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// return 404 not found if there is no active license
|
||||
if activeLicense == nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO deprecate this when we move away from key for stripe
|
||||
activeLicense.Data["key"] = activeLicense.Key
|
||||
render.Success(w, http.StatusOK, activeLicense.Data)
|
||||
}
|
||||
|
||||
// this function is called by zeus when inserting licenses in the query-service
|
||||
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
var licenseKey ApplyLicenseRequest
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if l.Key == "" {
|
||||
if licenseKey.LicenseKey == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
||||
|
||||
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, license)
|
||||
render.Success(w, http.StatusAccepted, nil)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
apiError := ah.LM().RefreshLicense(r.Context())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -154,12 +185,38 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
ah.Respond(w, billingResponse.Data)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||
licensesV2 := []model.License{}
|
||||
for _, l := range licenses {
|
||||
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = model.Basic
|
||||
}
|
||||
licenseV2 := model.License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
PlanDetails: "",
|
||||
FeatureSet: l.Features,
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: model.LicensePlan{
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
}
|
||||
licensesV2 = append(licensesV2, licenseV2)
|
||||
}
|
||||
return licensesV2
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
licenses := convertLicenseV3ToLicenseV2(licensesV3)
|
||||
|
||||
resp := model.Licenses{
|
||||
TrialStart: -1,
|
||||
|
||||
129
ee/query-service/app/api/queryrange.go
Normal file
129
ee/query-service/app/api/queryrange.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
|
||||
|
||||
if apiErrorObj != nil {
|
||||
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
|
||||
RespondError(w, apiErrorObj, nil)
|
||||
return
|
||||
}
|
||||
queryRangeParams.Version = "v4"
|
||||
|
||||
// add temporality for each metric
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
anomalyQueryExists := false
|
||||
anomalyQuery := &v3.BuilderQuery{}
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
for _, fn := range query.Functions {
|
||||
if fn.Name == v3.FunctionNameAnomaly {
|
||||
anomalyQueryExists = true
|
||||
anomalyQuery = query
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if anomalyQueryExists {
|
||||
// ensure all queries have metric data source, and there should be only one anomaly query
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
// What is query.QueryName == query.Expression doing here?
|
||||
// In the current implementation, the way to recognize if a query is a formula is by
|
||||
// checking if the expression is the same as the query name. if the expression is different
|
||||
// then it is a formula. otherwise, it is simple builder query.
|
||||
if query.DataSource != v3.DataSourceMetrics && query.QueryName == query.Expression {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the threshold, and seasonality from the anomaly query
|
||||
var seasonality anomaly.Seasonality
|
||||
for _, fn := range anomalyQuery.Functions {
|
||||
if fn.Name == v3.FunctionNameAnomaly {
|
||||
seasonalityStr, ok := fn.NamedArgs["seasonality"].(string)
|
||||
if !ok {
|
||||
seasonalityStr = "daily"
|
||||
}
|
||||
if seasonalityStr == "weekly" {
|
||||
seasonality = anomaly.SeasonalityWeekly
|
||||
} else if seasonalityStr == "daily" {
|
||||
seasonality = anomaly.SeasonalityDaily
|
||||
} else {
|
||||
seasonality = anomaly.SeasonalityHourly
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
var provider anomaly.Provider
|
||||
switch seasonality {
|
||||
case anomaly.SeasonalityWeekly:
|
||||
provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityDaily:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityHourly:
|
||||
provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
default:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
}
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
resp := v3.QueryRangeResponse{
|
||||
Result: anomalies.Results,
|
||||
ResultType: "anomaly",
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
} else {
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
aH.QueryRangeV4(w, r)
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
@@ -19,19 +20,20 @@ type ClickhouseReader struct {
|
||||
|
||||
func NewDataConnector(
|
||||
localDB *sqlx.DB,
|
||||
ch clickhouse.Conn,
|
||||
promConfigPath string,
|
||||
lm interfaces.FeatureLookup,
|
||||
maxIdleConns int,
|
||||
maxOpenConns int,
|
||||
dialTimeout time.Duration,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
||||
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
conn: ch,
|
||||
appdb: localDB,
|
||||
ClickHouseReader: ch,
|
||||
ClickHouseReader: chReader,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
@@ -29,16 +28,18 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/http/middleware"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
@@ -62,22 +63,23 @@ import (
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferSpanMetrics bool
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
FluxIntervalForTraceDetail string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
@@ -108,25 +110,22 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
@@ -134,7 +133,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,25 +142,26 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
storage := os.Getenv("STORAGE")
|
||||
if storage == "clickhouse" {
|
||||
zap.L().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(
|
||||
localDB,
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.MaxIdleConns,
|
||||
serverOptions.MaxOpenConns,
|
||||
serverOptions.DialTimeout,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
} else {
|
||||
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
|
||||
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
qb := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
@@ -170,45 +170,55 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
baseconst.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
integrationsController, err := integrations.NewController(localDB)
|
||||
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create cloud provider integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -216,8 +226,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// initiate agent config handler
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: localDB,
|
||||
DBEngine: AppDbEngine,
|
||||
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -225,7 +234,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,17 +246,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -256,20 +255,20 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
MaxIdleConns: serverOptions.MaxIdleConns,
|
||||
MaxOpenConns: serverOptions.MaxOpenConns,
|
||||
DialTimeout: serverOptions.DialTimeout,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
UsageManager: usageManager,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
GatewayUrl: serverOptions.GatewayUrl,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
@@ -286,7 +285,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
usageManager: usageManager,
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler)
|
||||
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -312,10 +311,13 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
r.Use(baseapp.LogCommentEnricher)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddlewarePrivate)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
@@ -335,7 +337,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
@@ -348,22 +350,27 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
}
|
||||
|
||||
if user.User.OrgId == "" {
|
||||
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
r.Use(baseapp.LogCommentEnricher)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
apiHandler.RegisterWebSocketPaths(r, am)
|
||||
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||
@@ -378,36 +385,16 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
@@ -487,32 +474,29 @@ func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}
|
||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||
}
|
||||
|
||||
signozMetricsUsed := false
|
||||
signozLogsUsed := false
|
||||
signozTracesUsed := false
|
||||
if postData != nil {
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||
|
||||
if postData.CompositeQuery != nil {
|
||||
data["queryType"] = postData.CompositeQuery.QueryType
|
||||
data["panelType"] = postData.CompositeQuery.PanelType
|
||||
|
||||
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||
}
|
||||
}
|
||||
|
||||
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||
if signozMetricsUsed {
|
||||
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||
if queryInfoResult.MetricsUsed {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if signozLogsUsed {
|
||||
if queryInfoResult.LogsUsed {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
if signozTracesUsed {
|
||||
if queryInfoResult.TracesUsed {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
data["metricsUsed"] = signozMetricsUsed
|
||||
data["logsUsed"] = signozLogsUsed
|
||||
data["tracesUsed"] = signozTracesUsed
|
||||
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||
data["queryType"] = queryInfoResult.QueryType
|
||||
data["panelType"] = queryInfoResult.PanelType
|
||||
|
||||
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
// switch case to set data["screen"] based on the referrer
|
||||
@@ -579,23 +563,6 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var cancel context.CancelFunc
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// initListeners initialises listeners of the server
|
||||
func (s *Server) initListeners() error {
|
||||
// listen on public port
|
||||
@@ -732,9 +699,11 @@ func makeRulesManager(
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool) (*baserules.Manager, error) {
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool) (*baserules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -756,14 +725,17 @@ func makeRulesManager(
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: nil,
|
||||
Logger: zap.L(),
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -14,6 +14,9 @@ var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||
|
||||
// this is set via build time variable
|
||||
var ZeusURL = "https://api.signoz.cloud"
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
if len(v) == 0 {
|
||||
|
||||
@@ -1,18 +1,10 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
)
|
||||
|
||||
func InitDao(engine, path string) (ModelDao, error) {
|
||||
|
||||
switch engine {
|
||||
case "sqlite":
|
||||
return sqlite.InitDB(path)
|
||||
default:
|
||||
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
|
||||
}
|
||||
|
||||
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
|
||||
return sqlite.InitDB(inputDB)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type modelDao struct {
|
||||
@@ -29,113 +28,15 @@ func (m *modelDao) checkFeature(key string) error {
|
||||
return m.flags.CheckFeature(key)
|
||||
}
|
||||
|
||||
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to query table info", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var (
|
||||
cid int
|
||||
name string
|
||||
ctype string
|
||||
notnull int
|
||||
dflt_value *string
|
||||
pk int
|
||||
)
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&cid, &name, &ctype, ¬null, &dflt_value, &pk)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
if name == columnName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(dataSourceName)
|
||||
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(inputDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// set package variable so dependent base methods (e.g. AuthCache) will work
|
||||
basedao.SetDB(dao)
|
||||
m := &modelDao{ModelDaoSqlite: dao}
|
||||
|
||||
table_schema := `
|
||||
PRAGMA foreign_keys = ON;
|
||||
CREATE TABLE IF NOT EXISTS org_domains(
|
||||
id TEXT PRIMARY KEY,
|
||||
org_id TEXT NOT NULL,
|
||||
name VARCHAR(50) NOT NULL UNIQUE,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER,
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY(org_id) REFERENCES organizations(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
role TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
token TEXT NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
expires_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL,
|
||||
last_used INTEGER NOT NULL,
|
||||
revoked BOOLEAN NOT NULL,
|
||||
updated_by_user_id TEXT NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||
);
|
||||
`
|
||||
|
||||
_, err = m.DB().Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
||||
}
|
||||
|
||||
if !columnExists(m.DB(), "personal_access_tokens", "role") {
|
||||
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||
}
|
||||
}
|
||||
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
|
||||
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||
}
|
||||
}
|
||||
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
|
||||
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||
}
|
||||
}
|
||||
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
|
||||
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||
}
|
||||
}
|
||||
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
|
||||
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
RoutePrefix string = "/api/gateway"
|
||||
AllowedPrefix string = "/v1/workspaces/me"
|
||||
var (
|
||||
RoutePrefix string = "/api/gateway"
|
||||
AllowedPrefix []string = []string{"/v1/workspaces/me", "/v2/profiles/me", "/v2/deployments/me"}
|
||||
)
|
||||
|
||||
type proxy struct {
|
||||
|
||||
@@ -2,14 +2,7 @@ package signozio
|
||||
|
||||
type status string
|
||||
|
||||
type ActivationResult struct {
|
||||
Status status `json:"status"`
|
||||
Data *ActivationResponse `json:"data,omitempty"`
|
||||
ErrorType string `json:"errorType,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type ActivationResponse struct {
|
||||
ActivationId string `json:"ActivationId"`
|
||||
PlanDetails string `json:"PlanDetails"`
|
||||
type ValidateLicenseResponse struct {
|
||||
Status status `json:"status"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
@@ -23,12 +23,14 @@ const (
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Prefix string
|
||||
Prefix string
|
||||
GatewayUrl string
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
return &Client{
|
||||
Prefix: constants.LicenseSignozIo,
|
||||
Prefix: constants.LicenseSignozIo,
|
||||
GatewayUrl: constants.ZeusURL,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,82 +38,56 @@ func init() {
|
||||
C = New()
|
||||
}
|
||||
|
||||
// ActivateLicense sends key to license.signoz.io and gets activation data
|
||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
|
||||
licenseReq := map[string]string{
|
||||
"key": key,
|
||||
"siteId": siteId,
|
||||
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
|
||||
|
||||
// Creating an HTTP client with a timeout for better control
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
reqString, _ := json.Marshal(licenseReq)
|
||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
|
||||
}
|
||||
|
||||
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||
// Setting the custom header
|
||||
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer httpResponse.Body.Close()
|
||||
|
||||
// read api request result
|
||||
result := ActivationResult{}
|
||||
err = json.Unmarshal(httpBody, &result)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||
}
|
||||
|
||||
switch httpResponse.StatusCode {
|
||||
case 200, 201:
|
||||
return result.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
default:
|
||||
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ValidateLicense validates the license key
|
||||
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
|
||||
validReq := map[string]string{
|
||||
"activationId": activationId,
|
||||
}
|
||||
|
||||
reqString, _ := json.Marshal(validReq)
|
||||
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 200, 201:
|
||||
a := ActivationResult{}
|
||||
case 200:
|
||||
a := ValidateLicenseResponse{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
}
|
||||
return a.Data, nil
|
||||
case 400, 401:
|
||||
|
||||
license, err := model.NewLicenseV3(a.Data)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
|
||||
}
|
||||
|
||||
return license, nil
|
||||
case 400:
|
||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
|
||||
case 401:
|
||||
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
|
||||
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
|
||||
default:
|
||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,12 +3,13 @@ package license
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/license/sqlite"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
@@ -26,103 +27,138 @@ func NewLicenseRepo(db *sqlx.DB) Repo {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) InitDB(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
|
||||
licensesData := []model.LicenseDB{}
|
||||
licenseV3Data := []*model.LicenseV3{}
|
||||
|
||||
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
||||
licenses := []model.License{}
|
||||
query := "SELECT id,key,data FROM licenses_v3"
|
||||
|
||||
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
|
||||
|
||||
err := r.db.Select(&licenses, query)
|
||||
err := r.db.Select(&licensesData, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
|
||||
}
|
||||
|
||||
return licenses, nil
|
||||
for _, l := range licensesData {
|
||||
var licenseData map[string]interface{}
|
||||
err := json.Unmarshal([]byte(l.Data), &licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
|
||||
}
|
||||
licenseV3Data = append(licenseV3Data, license)
|
||||
}
|
||||
|
||||
return licenseV3Data, nil
|
||||
}
|
||||
|
||||
// GetActiveLicense fetches the latest active license from DB.
|
||||
// If the license is not present, expect a nil license and a nil error in the output.
|
||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||
var err error
|
||||
licenses := []model.License{}
|
||||
activeLicenseV3, err := r.GetActiveLicenseV3(ctx)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||
}
|
||||
|
||||
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
|
||||
if activeLicenseV3 == nil {
|
||||
return nil, nil
|
||||
}
|
||||
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
|
||||
return activeLicenseV2, nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
|
||||
var err error
|
||||
licenses := []model.LicenseDB{}
|
||||
|
||||
query := "SELECT id,key,data FROM licenses_v3"
|
||||
|
||||
err = r.db.Select(&licenses, query)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||
}
|
||||
|
||||
var active *model.License
|
||||
var active *model.LicenseV3
|
||||
for _, l := range licenses {
|
||||
l.ParsePlan()
|
||||
var licenseData map[string]interface{}
|
||||
err := json.Unmarshal([]byte(l.Data), &licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
|
||||
}
|
||||
|
||||
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
|
||||
}
|
||||
|
||||
if active == nil &&
|
||||
(l.ValidFrom != 0) &&
|
||||
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
|
||||
active = &l
|
||||
(license.ValidFrom != 0) &&
|
||||
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
|
||||
active = license
|
||||
}
|
||||
if active != nil &&
|
||||
l.ValidFrom > active.ValidFrom &&
|
||||
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
|
||||
active = &l
|
||||
license.ValidFrom > active.ValidFrom &&
|
||||
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
|
||||
active = license
|
||||
}
|
||||
}
|
||||
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// InsertLicense inserts a new license in db
|
||||
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
||||
// InsertLicenseV3 inserts a new license v3 in db
|
||||
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
|
||||
|
||||
if l.Key == "" {
|
||||
return fmt.Errorf("insert license failed: license key is required")
|
||||
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
|
||||
|
||||
// licsense is the entity of zeus so putting the entire license here without defining schema
|
||||
licenseData, err := json.Marshal(l.Data)
|
||||
if err != nil {
|
||||
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
|
||||
}
|
||||
|
||||
query := `INSERT INTO licenses
|
||||
(key, planDetails, activationId, validationmessage)
|
||||
VALUES ($1, $2, $3, $4)`
|
||||
|
||||
_, err := r.db.ExecContext(ctx,
|
||||
_, err = r.db.ExecContext(ctx,
|
||||
query,
|
||||
l.ID,
|
||||
l.Key,
|
||||
l.PlanDetails,
|
||||
l.ActivationId,
|
||||
l.ValidationMessage)
|
||||
string(licenseData),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if sqliteErr, ok := err.(sqlite3.Error); ok {
|
||||
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
|
||||
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
|
||||
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
|
||||
}
|
||||
}
|
||||
zap.L().Error("error in inserting license data: ", zap.Error(err))
|
||||
return fmt.Errorf("failed to insert license in db: %v", err)
|
||||
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePlanDetails writes new plan details to the db
|
||||
func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
||||
key,
|
||||
planDetails string) error {
|
||||
// UpdateLicenseV3 updates a new license v3 in db
|
||||
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
|
||||
|
||||
if key == "" {
|
||||
return fmt.Errorf("update plan details failed: license key is required")
|
||||
// the key and id for the license can't change so only update the data here!
|
||||
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
|
||||
|
||||
license, err := json.Marshal(l.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert license failed: license marshal error")
|
||||
}
|
||||
|
||||
query := `UPDATE licenses
|
||||
SET planDetails = $1,
|
||||
updatedAt = $2
|
||||
WHERE key = $3`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
||||
_, err = r.db.ExecContext(ctx,
|
||||
query,
|
||||
license,
|
||||
l.ID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("error in updating license: ", zap.Error(err))
|
||||
zap.L().Error("error in updating license data: ", zap.Error(err))
|
||||
return fmt.Errorf("failed to update license in db: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,11 @@ package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"sync"
|
||||
|
||||
@@ -26,57 +26,43 @@ var LM *Manager
|
||||
var validationFrequency = 24 * 60 * time.Minute
|
||||
|
||||
type Manager struct {
|
||||
repo *Repo
|
||||
mutex sync.Mutex
|
||||
|
||||
repo *Repo
|
||||
mutex sync.Mutex
|
||||
validatorRunning bool
|
||||
|
||||
// end the license validation, this is important to gracefully
|
||||
// stopping validation and protect in-consistent updates
|
||||
done chan struct{}
|
||||
|
||||
// terminated waits for the validate go routine to end
|
||||
terminated chan struct{}
|
||||
|
||||
// last time the license was validated
|
||||
lastValidated int64
|
||||
|
||||
// keep track of validation failure attempts
|
||||
failedAttempts uint64
|
||||
|
||||
// keep track of active license and features
|
||||
activeLicense *model.License
|
||||
activeFeatures basemodel.FeatureSet
|
||||
activeLicenseV3 *model.LicenseV3
|
||||
activeFeatures basemodel.FeatureSet
|
||||
}
|
||||
|
||||
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||
func StartManager(db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||
if LM != nil {
|
||||
return LM, nil
|
||||
}
|
||||
|
||||
repo := NewLicenseRepo(db)
|
||||
err := repo.InitDB(dbType)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
repo: &repo,
|
||||
}
|
||||
|
||||
if err := m.start(features...); err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
LM = m
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// start loads active license in memory and initiates validator
|
||||
func (lm *Manager) start(features ...basemodel.Feature) error {
|
||||
err := lm.LoadActiveLicense(features...)
|
||||
|
||||
return err
|
||||
return lm.LoadActiveLicenseV3(features...)
|
||||
}
|
||||
|
||||
func (lm *Manager) Stop() {
|
||||
@@ -84,7 +70,7 @@ func (lm *Manager) Stop() {
|
||||
<-lm.terminated
|
||||
}
|
||||
|
||||
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
||||
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
|
||||
lm.mutex.Lock()
|
||||
defer lm.mutex.Unlock()
|
||||
|
||||
@@ -92,8 +78,8 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
||||
return
|
||||
}
|
||||
|
||||
lm.activeLicense = l
|
||||
lm.activeFeatures = append(l.FeatureSet, features...)
|
||||
lm.activeLicenseV3 = l
|
||||
lm.activeFeatures = append(l.Features, features...)
|
||||
// set default features
|
||||
setDefaultFeatures(lm)
|
||||
|
||||
@@ -105,7 +91,7 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
||||
// we want to make sure only one validator runs,
|
||||
// we already have lock() so good to go
|
||||
lm.validatorRunning = true
|
||||
go lm.Validator(context.Background())
|
||||
go lm.ValidatorV3(context.Background())
|
||||
}
|
||||
|
||||
}
|
||||
@@ -114,14 +100,14 @@ func setDefaultFeatures(lm *Manager) {
|
||||
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
||||
}
|
||||
|
||||
// LoadActiveLicense loads the most recent active license
|
||||
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
||||
active, err := lm.repo.GetActiveLicense(context.Background())
|
||||
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
|
||||
active, err := lm.repo.GetActiveLicenseV3(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if active != nil {
|
||||
lm.SetActive(active, features...)
|
||||
lm.SetActiveV3(active, features...)
|
||||
} else {
|
||||
zap.L().Info("No active license found, defaulting to basic plan")
|
||||
// if no active license is found, we default to basic(free) plan with all default features
|
||||
@@ -137,40 +123,37 @@ func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
|
||||
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
|
||||
|
||||
licenses, err := lm.repo.GetLicenses(ctx)
|
||||
licenses, err := lm.repo.GetLicensesV3(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
for _, l := range licenses {
|
||||
l.ParsePlan()
|
||||
|
||||
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key {
|
||||
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
|
||||
l.IsCurrent = true
|
||||
}
|
||||
|
||||
if l.ValidUntil == -1 {
|
||||
// for subscriptions, there is no end-date as such
|
||||
// but for showing user some validity we default one year timespan
|
||||
l.ValidUntil = l.ValidFrom + 31556926
|
||||
}
|
||||
|
||||
response = append(response, l)
|
||||
}
|
||||
|
||||
return
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Validator validates license after an epoch of time
|
||||
func (lm *Manager) Validator(ctx context.Context) {
|
||||
func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||
zap.L().Info("ValidatorV3 started!")
|
||||
defer close(lm.terminated)
|
||||
|
||||
tick := time.NewTicker(validationFrequency)
|
||||
defer tick.Stop()
|
||||
|
||||
lm.Validate(ctx)
|
||||
|
||||
lm.ValidateV3(ctx)
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
@@ -180,17 +163,33 @@ func (lm *Manager) Validator(ctx context.Context) {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
lm.Validate(ctx)
|
||||
lm.ValidateV3(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the current active license
|
||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError
|
||||
}
|
||||
|
||||
err := lm.repo.UpdateLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
|
||||
}
|
||||
lm.SetActiveV3(license)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
|
||||
zap.L().Info("License validation started")
|
||||
if lm.activeLicense == nil {
|
||||
if lm.activeLicenseV3 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -200,62 +199,42 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||
lm.lastValidated = time.Now().Unix()
|
||||
if reterr != nil {
|
||||
zap.L().Error("License validation completed with error", zap.Error(reterr))
|
||||
|
||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||
// default to basic plan if validation fails for three consecutive times
|
||||
if atomic.LoadUint64(&lm.failedAttempts) > 3 {
|
||||
zap.L().Error("License validation completed with error for three consecutive times, defaulting to basic plan", zap.String("license_id", lm.activeLicenseV3.ID), zap.Bool("license_validation", false))
|
||||
lm.activeLicenseV3 = nil
|
||||
lm.activeFeatures = model.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||
}
|
||||
lm.done <- struct{}{}
|
||||
lm.validatorRunning = false
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||
map[string]interface{}{"err": reterr.Error()}, "", true, false)
|
||||
} else {
|
||||
// reset the failed attempts counter
|
||||
atomic.StoreUint64(&lm.failedAttempts, 0)
|
||||
zap.L().Info("License validation completed with no errors")
|
||||
}
|
||||
|
||||
lm.mutex.Unlock()
|
||||
}()
|
||||
|
||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError.Err
|
||||
err := lm.RefreshLicense(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
||||
// license plan hasnt changed, nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
if response.PlanDetails != "" {
|
||||
|
||||
// copy and replace the active license record
|
||||
l := model.License{
|
||||
Key: lm.activeLicense.Key,
|
||||
CreatedAt: lm.activeLicense.CreatedAt,
|
||||
PlanDetails: response.PlanDetails,
|
||||
ValidationMessage: lm.activeLicense.ValidationMessage,
|
||||
ActivationId: lm.activeLicense.ActivationId,
|
||||
}
|
||||
|
||||
if err := l.ParsePlan(); err != nil {
|
||||
zap.L().Error("failed to parse updated license", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// updated plan is parsable, check if plan has changed
|
||||
if lm.activeLicense.PlanDetails != response.PlanDetails {
|
||||
err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
|
||||
if err != nil {
|
||||
// unexpected db write issue but we can let the user continue
|
||||
// and wait for update to work in next cycle.
|
||||
zap.L().Error("failed to validate license", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// activate the update license plan
|
||||
lm.SetActive(&l)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Activate activates a license key with signoz server
|
||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
||||
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||
@@ -266,36 +245,22 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
||||
}
|
||||
}()
|
||||
|
||||
response, apiError := validate.ActivateLicense(key, "")
|
||||
license, apiError := validate.ValidateLicenseV3(licenseKey)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
|
||||
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
|
||||
return nil, apiError
|
||||
}
|
||||
|
||||
l := &model.License{
|
||||
Key: key,
|
||||
ActivationId: response.ActivationId,
|
||||
PlanDetails: response.PlanDetails,
|
||||
}
|
||||
|
||||
// parse validity and features from the plan details
|
||||
err := l.ParsePlan()
|
||||
|
||||
// insert the new license to the sqlite db
|
||||
err := lm.repo.InsertLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
// store the license before activating it
|
||||
err = lm.repo.InsertLicense(ctx, l)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// license is valid, activate it
|
||||
lm.SetActive(l)
|
||||
return l, nil
|
||||
lm.SetActiveV3(license)
|
||||
return license, nil
|
||||
}
|
||||
|
||||
// CheckFeature will be internally used by backend routines
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS licenses(
|
||||
key TEXT PRIMARY KEY,
|
||||
createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updatedAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
planDetails TEXT,
|
||||
activationId TEXT,
|
||||
validationMessage TEXT,
|
||||
lastValidated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sites(
|
||||
uuid TEXT PRIMARY KEY,
|
||||
alias VARCHAR(180) DEFAULT 'PROD',
|
||||
url VARCHAR(300),
|
||||
createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in creating licenses table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
||||
name TEXT PRIMARY KEY,
|
||||
active bool,
|
||||
usage INTEGER DEFAULT 0,
|
||||
usage_limit INTEGER DEFAULT 0,
|
||||
route TEXT
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -13,13 +13,18 @@ import (
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
|
||||
@@ -77,6 +82,10 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
@@ -88,7 +97,8 @@ func main() {
|
||||
var cluster string
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
@@ -96,8 +106,10 @@ func main() {
|
||||
var maxOpenConns int
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
@@ -108,10 +120,11 @@ func main() {
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
@@ -121,22 +134,43 @@ func main() {
|
||||
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
}, signoz.DeprecatedFlags{
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
CacheConfigPath: cacheConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
Config: config,
|
||||
SigNoz: signoz,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
CacheConfigPath: cacheConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
@@ -148,12 +182,6 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
|
||||
@@ -46,6 +46,13 @@ func BadRequest(err error) *ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
func Unauthorized(err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// BadRequestStr returns a ApiError object of bad request for string input
|
||||
func BadRequestStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -59,37 +60,6 @@ type LicensePlan struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func (l *License) ParsePlan() error {
|
||||
l.LicensePlan = LicensePlan{}
|
||||
|
||||
planData, err := base64.StdEncoding.DecodeString(l.PlanDetails)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
plan := LicensePlan{}
|
||||
err = json.Unmarshal([]byte(planData), &plan)
|
||||
if err != nil {
|
||||
l.ValidationMessage = "failed to parse plan from license"
|
||||
return errors.Wrap(err, "failed to parse plan from license")
|
||||
}
|
||||
|
||||
l.LicensePlan = plan
|
||||
l.ParseFeatures()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *License) ParseFeatures() {
|
||||
switch l.PlanKey {
|
||||
case Pro:
|
||||
l.FeatureSet = ProPlan
|
||||
case Enterprise:
|
||||
l.FeatureSet = EnterprisePlan
|
||||
default:
|
||||
l.FeatureSet = BasicPlan
|
||||
}
|
||||
}
|
||||
|
||||
type Licenses struct {
|
||||
TrialStart int64 `json:"trialStart"`
|
||||
TrialEnd int64 `json:"trialEnd"`
|
||||
@@ -104,3 +74,165 @@ type SubscriptionServerResp struct {
|
||||
Status string `json:"status"`
|
||||
Data Licenses `json:"data"`
|
||||
}
|
||||
|
||||
type Plan struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type LicenseDB struct {
|
||||
ID string `json:"id"`
|
||||
Key string `json:"key"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
type LicenseV3 struct {
|
||||
ID string
|
||||
Key string
|
||||
Data map[string]interface{}
|
||||
PlanName string
|
||||
Features basemodel.FeatureSet
|
||||
Status string
|
||||
IsCurrent bool
|
||||
ValidFrom int64
|
||||
ValidUntil int64
|
||||
}
|
||||
|
||||
func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
|
||||
var zeroValue T
|
||||
if val, ok := data[key]; ok {
|
||||
if value, ok := val.(T); ok {
|
||||
return value, nil
|
||||
}
|
||||
return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
|
||||
}
|
||||
return zeroValue, fmt.Errorf("%s key is missing", key)
|
||||
}
|
||||
|
||||
func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||
var features basemodel.FeatureSet
|
||||
|
||||
// extract id from data
|
||||
licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(data, "id")
|
||||
|
||||
// extract key from data
|
||||
licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(data, "key")
|
||||
|
||||
// extract status from data
|
||||
status, err := extractKeyFromMapStringInterface[string](data, "status")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if license status is inactive then default it to basic
|
||||
if status == LicenseStatusInvalid {
|
||||
planName = PlanNameBasic
|
||||
}
|
||||
|
||||
featuresFromZeus := basemodel.FeatureSet{}
|
||||
if _features, ok := data["features"]; ok {
|
||||
featuresData, err := json.Marshal(_features)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal features data")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal features data")
|
||||
}
|
||||
}
|
||||
|
||||
switch planName {
|
||||
case PlanNameTeams:
|
||||
features = append(features, ProPlan...)
|
||||
case PlanNameEnterprise:
|
||||
features = append(features, EnterprisePlan...)
|
||||
case PlanNameBasic:
|
||||
features = append(features, BasicPlan...)
|
||||
default:
|
||||
features = append(features, BasicPlan...)
|
||||
}
|
||||
|
||||
if len(featuresFromZeus) > 0 {
|
||||
for _, feature := range featuresFromZeus {
|
||||
exists := false
|
||||
for i, existingFeature := range features {
|
||||
if existingFeature.Name == feature.Name {
|
||||
features[i] = feature // Replace existing feature
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
features = append(features, feature) // Append if it doesn't exist
|
||||
}
|
||||
}
|
||||
}
|
||||
data["features"] = features
|
||||
|
||||
_validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
|
||||
if err != nil {
|
||||
_validFrom = 0
|
||||
}
|
||||
validFrom := int64(_validFrom)
|
||||
|
||||
_validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
|
||||
if err != nil {
|
||||
_validUntil = 0
|
||||
}
|
||||
validUntil := int64(_validUntil)
|
||||
|
||||
return &LicenseV3{
|
||||
ID: licenseID,
|
||||
Key: licenseKey,
|
||||
Data: data,
|
||||
PlanName: planName,
|
||||
Features: features,
|
||||
ValidFrom: validFrom,
|
||||
ValidUntil: validUntil,
|
||||
Status: status,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
|
||||
licenseDataWithIdAndKey := data
|
||||
licenseDataWithIdAndKey["id"] = id
|
||||
licenseDataWithIdAndKey["key"] = key
|
||||
return NewLicenseV3(licenseDataWithIdAndKey)
|
||||
}
|
||||
|
||||
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
|
||||
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = Basic
|
||||
}
|
||||
return &License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
PlanDetails: "",
|
||||
FeatureSet: l.Features,
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: LicensePlan{
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
170
ee/query-service/model/license_test.go
Normal file
170
ee/query-service/model/license_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestNewLicenseV3(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
data []byte
|
||||
pass bool
|
||||
expected *LicenseV3
|
||||
error error
|
||||
}{
|
||||
{
|
||||
name: "Error for missing license id",
|
||||
data: []byte(`{}`),
|
||||
pass: false,
|
||||
error: errors.New("id key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for license id not being a valid string",
|
||||
data: []byte(`{"id": 10}`),
|
||||
pass: false,
|
||||
error: errors.New("id key is not a valid string"),
|
||||
},
|
||||
{
|
||||
name: "Error for missing license key",
|
||||
data: []byte(`{"id":"does-not-matter"}`),
|
||||
pass: false,
|
||||
error: errors.New("key key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid string license key",
|
||||
data: []byte(`{"id":"does-not-matter","key":10}`),
|
||||
pass: false,
|
||||
error: errors.New("key key is not a valid string"),
|
||||
},
|
||||
{
|
||||
name: "Error for missing license status",
|
||||
data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
|
||||
pass: false,
|
||||
error: errors.New("status key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid string license status",
|
||||
data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
|
||||
pass: false,
|
||||
error: errors.New("status key is not a valid string"),
|
||||
},
|
||||
{
|
||||
name: "Error for missing license plan",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
|
||||
pass: false,
|
||||
error: errors.New("plan key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid json license plan",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
|
||||
pass: false,
|
||||
error: errors.New("plan key is not a valid map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid license plan",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
|
||||
pass: false,
|
||||
error: errors.New("name key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Parse the entire license properly",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "ACTIVE",
|
||||
IsCurrent: false,
|
||||
Features: model.FeatureSet{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Fallback to basic plan if license status is inactive",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "INACTIVE",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameBasic,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "INACTIVE",
|
||||
IsCurrent: false,
|
||||
Features: model.FeatureSet{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fallback states for validFrom and validUntil",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
},
|
||||
"valid_from": 1234.456,
|
||||
"valid_until": 5678.567,
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
ValidFrom: 1234,
|
||||
ValidUntil: 5678,
|
||||
Status: "ACTIVE",
|
||||
IsCurrent: false,
|
||||
Features: model.FeatureSet{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var licensePayload map[string]interface{}
|
||||
err := json.Unmarshal(tc.data, &licensePayload)
|
||||
require.NoError(t, err)
|
||||
license, err := NewLicenseV3(licensePayload)
|
||||
if license != nil {
|
||||
license.Features = make(model.FeatureSet, 0)
|
||||
delete(license.Data, "features")
|
||||
}
|
||||
|
||||
if tc.pass {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, license)
|
||||
assert.Equal(t, tc.expected, license)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.EqualError(t, err, tc.error.Error())
|
||||
require.Nil(t, license)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
@@ -8,12 +9,26 @@ const SSO = "SSO"
|
||||
const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
|
||||
var (
|
||||
PlanNameEnterprise = "ENTERPRISE"
|
||||
PlanNameTeams = "TEAMS"
|
||||
PlanNameBasic = "BASIC"
|
||||
)
|
||||
|
||||
var (
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||
)
|
||||
|
||||
var (
|
||||
LicenseStatusInvalid = "INVALID"
|
||||
)
|
||||
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
const Gateway = "GATEWAY"
|
||||
const PremiumSupport = "PREMIUM_SUPPORT"
|
||||
const QueryBuilderSearchV2 = "QUERY_BUILDER_SEARCH_V2"
|
||||
|
||||
var BasicPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
@@ -129,12 +144,19 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: QueryBuilderSearchV2,
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
@@ -244,8 +266,15 @@ var ProPlan = basemodel.FeatureSet{
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: QueryBuilderSearchV2,
|
||||
Active: false,
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
@@ -373,8 +402,15 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: QueryBuilderSearchV2,
|
||||
Active: false,
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user