Compare commits
913 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b70d50f2b3 | ||
|
|
728f699051 | ||
|
|
3bbbc759d3 | ||
|
|
2230ca1740 | ||
|
|
440fd4e02b | ||
|
|
78a924d378 | ||
|
|
b03fadc2ec | ||
|
|
4b79d3b785 | ||
|
|
a24fb5d84f | ||
|
|
137059ded6 | ||
|
|
f1ce82ac25 | ||
|
|
4aeed392d7 | ||
|
|
4356ddae8c | ||
|
|
76e7de3aed | ||
|
|
ae5e63cc64 | ||
|
|
5ef05891ce | ||
|
|
c452e23b18 | ||
|
|
69aab87d72 | ||
|
|
a60674cf1b | ||
|
|
022b9226a7 | ||
|
|
36e2404814 | ||
|
|
2eb3f6cb06 | ||
|
|
98cbdf570f | ||
|
|
d380894c35 | ||
|
|
ea0263cc73 | ||
|
|
f38a1d9f1c | ||
|
|
9390a815a8 | ||
|
|
4f76e13dbe | ||
|
|
6a4643558c | ||
|
|
a98c8db949 | ||
|
|
5ba9c9d48c | ||
|
|
e1ca71dcea | ||
|
|
266ed58908 | ||
|
|
1411ae41c3 | ||
|
|
bc8891d2f8 | ||
|
|
3b7455ac4c | ||
|
|
5a0a7c2c60 | ||
|
|
794d6fc0ca | ||
|
|
4c95df44d5 | ||
|
|
717545e14c | ||
|
|
e4d1452f5f | ||
|
|
88ace79a64 | ||
|
|
9b42326f80 | ||
|
|
44a3469b9b | ||
|
|
ef4b70f67b | ||
|
|
7a125e31ec | ||
|
|
c7bd7566c5 | ||
|
|
f4fbe62169 | ||
|
|
6e3141a4ce | ||
|
|
fc8391c5aa | ||
|
|
87499d1ead | ||
|
|
5fa8686fcf | ||
|
|
dc2db524c7 | ||
|
|
b3545b767a | ||
|
|
55f653d92e | ||
|
|
35f8e133a9 | ||
|
|
58d6487f77 | ||
|
|
6685482ea6 | ||
|
|
708158f50f | ||
|
|
0feab5aa93 | ||
|
|
b49ed913c7 | ||
|
|
419d2da363 | ||
|
|
df2844ea74 | ||
|
|
5e5f0f167f | ||
|
|
a6b05f0a3d | ||
|
|
f69aaa2cfb | ||
|
|
3866f89d3e | ||
|
|
f9ac41b865 | ||
|
|
c5b5bfe540 | ||
|
|
f3c01a5155 | ||
|
|
033b64a62a | ||
|
|
4aabfe7cf5 | ||
|
|
0218f701b2 | ||
|
|
f6d3f95768 | ||
|
|
cb1cd3555b | ||
|
|
ced72f86a4 | ||
|
|
54d5666b92 | ||
|
|
4edc6dbeae | ||
|
|
e203276678 | ||
|
|
8eb2cf144e | ||
|
|
2f7d208eb5 | ||
|
|
70fb5af19f | ||
|
|
fc7a94fa66 | ||
|
|
0077714cb0 | ||
|
|
723c31f6c5 | ||
|
|
1024483e58 | ||
|
|
cbcef2c880 | ||
|
|
0711c8855e | ||
|
|
72cbc1a9e7 | ||
|
|
a9841755a7 | ||
|
|
03e6c33f82 | ||
|
|
3c5aa86ee2 | ||
|
|
06a89b21da | ||
|
|
8c891f0e87 | ||
|
|
49dd5f2ef7 | ||
|
|
83d01e7a0d | ||
|
|
f8e97c9c5c | ||
|
|
b78ade2cf2 | ||
|
|
1b59719891 | ||
|
|
540a2c6712 | ||
|
|
481c4e1271 | ||
|
|
fe0d2a967f | ||
|
|
e77a6f4d7a | ||
|
|
a023a7514e | ||
|
|
3573c0863c | ||
|
|
b444c1e6b1 | ||
|
|
5698628839 | ||
|
|
3596f73fb1 | ||
|
|
5b22490d6d | ||
|
|
39f9fc6900 | ||
|
|
f854cdd9d3 | ||
|
|
011b2167ba | ||
|
|
a5f3a189f8 | ||
|
|
3fdfb51e02 | ||
|
|
43577c7ead | ||
|
|
6661aa7686 | ||
|
|
8d54e3b766 | ||
|
|
6c446226eb | ||
|
|
90b5f88413 | ||
|
|
381a4de88a | ||
|
|
10ebd0cad6 | ||
|
|
6e7f04b492 | ||
|
|
20ac75e3d2 | ||
|
|
d6b75d76ca | ||
|
|
41d3342a42 | ||
|
|
f3cb3b9840 | ||
|
|
08f3b089f4 | ||
|
|
1d8e5b6c0f | ||
|
|
0dcded59e5 | ||
|
|
4799d3147b | ||
|
|
b60b26189f | ||
|
|
c79520c874 | ||
|
|
2cc2a43e17 | ||
|
|
47d42e6a57 | ||
|
|
573d369d4b | ||
|
|
3c151e3adb | ||
|
|
ee1e2b824f | ||
|
|
6f0cf03371 | ||
|
|
b8d228a339 | ||
|
|
c6ba2b4598 | ||
|
|
36adc17a34 | ||
|
|
3e32dabf46 | ||
|
|
74c994fbab | ||
|
|
7844522691 | ||
|
|
bfb63ca8c4 | ||
|
|
71e24483dd | ||
|
|
12f2f80958 | ||
|
|
7b5ff54f47 | ||
|
|
afc97511af | ||
|
|
ae857d3fcd | ||
|
|
0db2784d6b | ||
|
|
317c41a166 | ||
|
|
47d1caf078 | ||
|
|
292b3f418e | ||
|
|
4eb533fff8 | ||
|
|
7a10fe2b8c | ||
|
|
4214e36d22 | ||
|
|
b9ab6d3fd4 | ||
|
|
23704b00ce | ||
|
|
266894b0f8 | ||
|
|
4a9847abdd | ||
|
|
ba95ca682b | ||
|
|
5942c758f0 | ||
|
|
e97d0ea51c | ||
|
|
6019b38da5 | ||
|
|
3544ffdcc6 | ||
|
|
be7a687088 | ||
|
|
ed4613cb1b | ||
|
|
1066b217cb | ||
|
|
709c286086 | ||
|
|
e4753e6b44 | ||
|
|
6c06fea1aa | ||
|
|
6f7999acb2 | ||
|
|
6bc2f9125c | ||
|
|
16738ea7e3 | ||
|
|
6b096576ee | ||
|
|
262beef8f9 | ||
|
|
5dc5b2e366 | ||
|
|
43cc6dea92 | ||
|
|
6684640abe | ||
|
|
363fb7bc34 | ||
|
|
dde4485839 | ||
|
|
44598e304d | ||
|
|
4295a2756a | ||
|
|
0a146910d6 | ||
|
|
690ed0f7f1 | ||
|
|
2f0d98ae51 | ||
|
|
fb92ddc822 | ||
|
|
15b0569b56 | ||
|
|
140533b790 | ||
|
|
532f274bd6 | ||
|
|
3200fd054e | ||
|
|
8468cc863e | ||
|
|
71911687bf | ||
|
|
9644297d28 | ||
|
|
faa6fdfcde | ||
|
|
aabf364cc6 | ||
|
|
4b861b2169 | ||
|
|
8d655bf419 | ||
|
|
90cb8ba9a1 | ||
|
|
f508ee7521 | ||
|
|
413caad0d8 | ||
|
|
666f601ecd | ||
|
|
5cdcbef00c | ||
|
|
c2f607ab6b | ||
|
|
2ca10bb87c | ||
|
|
6fb2a6d4c9 | ||
|
|
464589e0ca | ||
|
|
3b94dab3ce | ||
|
|
9f481aacff | ||
|
|
22f2e68db2 | ||
|
|
706f967246 | ||
|
|
1685f0e74f | ||
|
|
74162456e5 | ||
|
|
b798518aa9 | ||
|
|
d7fd1d032b | ||
|
|
a2ac49bfc2 | ||
|
|
33541a2ac0 | ||
|
|
947b5bdefb | ||
|
|
bd7d14b1ca | ||
|
|
43ed49f9d9 | ||
|
|
758b10f1bf | ||
|
|
ab1caf13fc | ||
|
|
96b81817e0 | ||
|
|
bfeceb0ed2 | ||
|
|
c322fc72d9 | ||
|
|
e7b5410c5b | ||
|
|
072693d57d | ||
|
|
a20794040a | ||
|
|
ab4a8dfbea | ||
|
|
fa0a065b95 | ||
|
|
abc8096a39 | ||
|
|
7cff07333f | ||
|
|
5796d6cb8c | ||
|
|
98367fd054 | ||
|
|
ff8df5dc36 | ||
|
|
f0c9f12897 | ||
|
|
5bcf7de440 | ||
|
|
79e96e544f | ||
|
|
871e5ada9e | ||
|
|
0401c27dbc | ||
|
|
57c45f22d6 | ||
|
|
29f1883edd | ||
|
|
5d903b5487 | ||
|
|
1b9683d699 | ||
|
|
65280cf4e1 | ||
|
|
703983a5f9 | ||
|
|
766a2123c5 | ||
|
|
1308f0f15f | ||
|
|
6c634b99d0 | ||
|
|
9856335840 | ||
|
|
e85b405396 | ||
|
|
e2e965bc7f | ||
|
|
7811fdd17a | ||
|
|
0dca1237b9 | ||
|
|
f3d73f6d44 | ||
|
|
187927403a | ||
|
|
0157b47424 | ||
|
|
156905afc7 | ||
|
|
a4878f6430 | ||
|
|
4489df6f39 | ||
|
|
06c075466b | ||
|
|
62be3e7c13 | ||
|
|
bb84960442 | ||
|
|
52199361d5 | ||
|
|
f031845300 | ||
|
|
6f73bb6eca | ||
|
|
fe398bcc49 | ||
|
|
6781c29082 | ||
|
|
eb146491f2 | ||
|
|
ae325ec1ca | ||
|
|
fd6f0574f5 | ||
|
|
b819a90c80 | ||
|
|
a6848f6abd | ||
|
|
abe65975c9 | ||
|
|
5cedd57aa2 | ||
|
|
80a7b9d16d | ||
|
|
9f7b2542ec | ||
|
|
4a4c9f26a2 | ||
|
|
c957c0f757 | ||
|
|
3ff0aa4b4b | ||
|
|
063c9adba6 | ||
|
|
5c3ce146fa | ||
|
|
481bb6e8b8 | ||
|
|
61e6316736 | ||
|
|
f9d1494657 | ||
|
|
0021b4d784 | ||
|
|
a5d5800871 | ||
|
|
16dc90bbd1 | ||
|
|
fff61379fe | ||
|
|
08a415032c | ||
|
|
3783ffdd4c | ||
|
|
a8e4359d95 | ||
|
|
d9e94a4067 | ||
|
|
ae19eaa76a | ||
|
|
fff9954da2 | ||
|
|
a476c68f7e | ||
|
|
fc15aa6f1c | ||
|
|
220edd139a | ||
|
|
4192fd573d | ||
|
|
ca13d80205 | ||
|
|
59121bd932 | ||
|
|
aef935a817 | ||
|
|
f300518d61 | ||
|
|
18b608a1d8 | ||
|
|
738d62c9cf | ||
|
|
38e694cd36 | ||
|
|
1281330c52 | ||
|
|
7b7cca7db7 | ||
|
|
3134e8c1cf | ||
|
|
d00024b64a | ||
|
|
4360cd0397 | ||
|
|
a688b6c60e | ||
|
|
522e73b48e | ||
|
|
ba7e6fcf23 | ||
|
|
eefccafa5b | ||
|
|
05bd6d52f1 | ||
|
|
d60daef171 | ||
|
|
d50530f58c | ||
|
|
6957bd71ca | ||
|
|
ef8b50c19e | ||
|
|
1585065fff | ||
|
|
99c68ddbcd | ||
|
|
b08e859426 | ||
|
|
89fd3e4f55 | ||
|
|
a2492b0135 | ||
|
|
eb8ca5a7ca | ||
|
|
80133240ca | ||
|
|
7d7d112f40 | ||
|
|
add2d19614 | ||
|
|
adfe20e88a | ||
|
|
8d84ce8f06 | ||
|
|
09ea7b9eb5 | ||
|
|
d3b83f5a41 | ||
|
|
77eba9a558 | ||
|
|
43e73e06fe | ||
|
|
840d8b2e49 | ||
|
|
df751c7f38 | ||
|
|
cd07c743b6 | ||
|
|
46e6c34e51 | ||
|
|
42f7905b3b | ||
|
|
a6e68c6519 | ||
|
|
c7e3e6dc4e | ||
|
|
9194ab08b6 | ||
|
|
3ecb2e35ef | ||
|
|
9844dcdfb7 | ||
|
|
ddf5569ce9 | ||
|
|
83455e614e | ||
|
|
831de18464 | ||
|
|
3b2a811f7b | ||
|
|
2c7a5126fd | ||
|
|
87f1597d4e | ||
|
|
916663b4d5 | ||
|
|
b0e355eb64 | ||
|
|
69a39531f0 | ||
|
|
9c9ed741b2 | ||
|
|
e6eaaa660a | ||
|
|
79eef5bb91 | ||
|
|
4d64f1dede | ||
|
|
bf177882e6 | ||
|
|
f6b29999c9 | ||
|
|
75815897b0 | ||
|
|
c9309eecaa | ||
|
|
4264fc0f3a | ||
|
|
ef854910db | ||
|
|
6b8b2ae761 | ||
|
|
a48340a2ea | ||
|
|
e542d2ee09 | ||
|
|
08431131a9 | ||
|
|
1b0ec8ac43 | ||
|
|
2e0ddc7c7f | ||
|
|
858a0cb0de | ||
|
|
216ad36234 | ||
|
|
6628abd435 | ||
|
|
7c81270ed9 | ||
|
|
81c3e6fa65 | ||
|
|
d215ce09b0 | ||
|
|
161a69fbe9 | ||
|
|
3ee51770fd | ||
|
|
932b7ddc69 | ||
|
|
6e466df89d | ||
|
|
326dec21fd | ||
|
|
b0b69c83db | ||
|
|
02106277a6 | ||
|
|
b34509215e | ||
|
|
fd603b8fdf | ||
|
|
c5d23336a7 | ||
|
|
53c6288025 | ||
|
|
4f2c314f39 | ||
|
|
1ad61615c6 | ||
|
|
7ddfadfb18 | ||
|
|
a7e02af8b0 | ||
|
|
da3f6fd7fd | ||
|
|
a453471b51 | ||
|
|
13df87ed69 | ||
|
|
f23ceea54e | ||
|
|
46b4c8a004 | ||
|
|
580198ca7a | ||
|
|
2fb5b16840 | ||
|
|
de571aa69a | ||
|
|
daa5a05677 | ||
|
|
4f69996b9d | ||
|
|
6c402d9e46 | ||
|
|
c6e9eeeee6 | ||
|
|
97b66741a7 | ||
|
|
6b234da969 | ||
|
|
51032f6caa | ||
|
|
41f91db622 | ||
|
|
52e0303997 | ||
|
|
5df25e83d1 | ||
|
|
873280abea | ||
|
|
8ccdc71eaf | ||
|
|
d5f156a6e9 | ||
|
|
cc7559ddee | ||
|
|
415057c260 | ||
|
|
89b67b8880 | ||
|
|
878cb7c0a6 | ||
|
|
0375fc47a7 | ||
|
|
a7a160df76 | ||
|
|
8cd60b5c60 | ||
|
|
8ff392bc96 | ||
|
|
b59d9c7b90 | ||
|
|
afcee9cd02 | ||
|
|
9dbef080c6 | ||
|
|
82a079e687 | ||
|
|
6c192f1242 | ||
|
|
adfeaaa1f0 | ||
|
|
6ee9705599 | ||
|
|
67965c8e4d | ||
|
|
38b1de5ccc | ||
|
|
64e06ab3f9 | ||
|
|
537641000d | ||
|
|
4916cf5083 | ||
|
|
f3c2fb0246 | ||
|
|
a4e98e565d | ||
|
|
faa1728b8c | ||
|
|
b69e97d7b0 | ||
|
|
c0195e9dc9 | ||
|
|
b69545a771 | ||
|
|
9a6db272c1 | ||
|
|
45d6430ab3 | ||
|
|
cf7bf32ac2 | ||
|
|
1695b4f06d | ||
|
|
a65d5095a0 | ||
|
|
0fade428ef | ||
|
|
3b4b9e43b3 | ||
|
|
c104b758ba | ||
|
|
2a4e97f8da | ||
|
|
f1b5da9916 | ||
|
|
fe87711b25 | ||
|
|
f824aa17dc | ||
|
|
e6ee5fc9e3 | ||
|
|
a788230e70 | ||
|
|
34750aba84 | ||
|
|
dbfa4e80bb | ||
|
|
ded58f5392 | ||
|
|
aa9689e025 | ||
|
|
f391ca8bb1 | ||
|
|
191a2a319d | ||
|
|
313fa4ae23 | ||
|
|
cacf4b99c2 | ||
|
|
1f4a8b9834 | ||
|
|
a681f6f397 | ||
|
|
dc294ff6d5 | ||
|
|
af8907d4f8 | ||
|
|
f01b4f2c03 | ||
|
|
a94231c00a | ||
|
|
2f949d2738 | ||
|
|
1645523ae9 | ||
|
|
a319d1ec53 | ||
|
|
699f79d6ba | ||
|
|
a3e36cbac9 | ||
|
|
f2aba5035a | ||
|
|
6af5aa0253 | ||
|
|
8a9c8031f5 | ||
|
|
cf54b5f9ec | ||
|
|
a17928df88 | ||
|
|
c53e6de689 | ||
|
|
35c054835a | ||
|
|
b57a24a177 | ||
|
|
a6e005e3a2 | ||
|
|
e5f96ac896 | ||
|
|
694f2562bf | ||
|
|
6a829489a8 | ||
|
|
d1c075983f | ||
|
|
4c7f90dad8 | ||
|
|
4d375e7cc3 | ||
|
|
bc8a235915 | ||
|
|
c703f5290a | ||
|
|
309ed3d1de | ||
|
|
2a3622130f | ||
|
|
7e9bf2d48d | ||
|
|
b39f703919 | ||
|
|
7f39d8282c | ||
|
|
9733612be8 | ||
|
|
1ce36c8344 | ||
|
|
ac2dc44abb | ||
|
|
2145e353c8 | ||
|
|
be9c3f0697 | ||
|
|
03838f5fcc | ||
|
|
592073a564 | ||
|
|
93df475969 | ||
|
|
0f9c8d91ef | ||
|
|
67779d6c2c | ||
|
|
f927969c7d | ||
|
|
ba0f63ad1e | ||
|
|
0760917a4b | ||
|
|
6aded04b7f | ||
|
|
b849705710 | ||
|
|
c913c8bf20 | ||
|
|
1328f05d78 | ||
|
|
1db1f76a72 | ||
|
|
932d892d9e | ||
|
|
3538815331 | ||
|
|
956a4d081d | ||
|
|
10b543dff1 | ||
|
|
96162d7949 | ||
|
|
3085093130 | ||
|
|
83d0ddeec0 | ||
|
|
ab444af8e6 | ||
|
|
749fba67cb | ||
|
|
fe96a78ee8 | ||
|
|
f77089da55 | ||
|
|
1d1d85efa3 | ||
|
|
c1c5c4dfa8 | ||
|
|
9eab315f76 | ||
|
|
1d86e5eb50 | ||
|
|
2f7495c6e4 | ||
|
|
1369fe1912 | ||
|
|
76b1e40cbc | ||
|
|
52e4c2d8ff | ||
|
|
7e79900973 | ||
|
|
f818a86720 | ||
|
|
0cf8817f3f | ||
|
|
0d043bf380 | ||
|
|
3b599ea41a | ||
|
|
da74619a46 | ||
|
|
93babc3019 | ||
|
|
f8b8080853 | ||
|
|
3c2bc06e6a | ||
|
|
a0d866c2ff | ||
|
|
52c8584e63 | ||
|
|
9a908c3f76 | ||
|
|
4887a1d8dd | ||
|
|
f2b0387a1b | ||
|
|
cbb9fd51f8 | ||
|
|
10e44ce440 | ||
|
|
6827d66ae9 | ||
|
|
611ec3e08d | ||
|
|
4ab350e721 | ||
|
|
631c12259f | ||
|
|
de497bf5b6 | ||
|
|
12be6ce020 | ||
|
|
2dbe598b2c | ||
|
|
cf64da2631 | ||
|
|
9ff0e34038 | ||
|
|
d313f44556 | ||
|
|
5a778dcb18 | ||
|
|
7e31b4ca01 | ||
|
|
3efd9801a1 | ||
|
|
0cbaa17d9f | ||
|
|
30bfad527f | ||
|
|
9f1c45bc32 | ||
|
|
51becf7cfb | ||
|
|
7460e650af | ||
|
|
a544723bb8 | ||
|
|
eb6f038db5 | ||
|
|
47dcd994f0 | ||
|
|
211fe4fdd5 | ||
|
|
e2992b42c1 | ||
|
|
3957d91a9b | ||
|
|
967aa16f21 | ||
|
|
08b1a87cb5 | ||
|
|
03ddcdd20e | ||
|
|
1aec7f3ca6 | ||
|
|
241edcb88a | ||
|
|
27d12871af | ||
|
|
e78e1d4b63 | ||
|
|
64bf580323 | ||
|
|
152aa4b518 | ||
|
|
b3d5831574 | ||
|
|
b85b9f42ed | ||
|
|
5c1c09c790 | ||
|
|
33960b05fd | ||
|
|
191d9b0648 | ||
|
|
7d81bc3417 | ||
|
|
506916661d | ||
|
|
5326f2d23b | ||
|
|
dfaa344dce | ||
|
|
882b540a0b | ||
|
|
1c4b579c3d | ||
|
|
706f25cc5d | ||
|
|
e6e0a59f5f | ||
|
|
b2c170c752 | ||
|
|
ee421af95c | ||
|
|
453be9074d | ||
|
|
3272444e13 | ||
|
|
71b3e6d522 | ||
|
|
6cf7cc9f4f | ||
|
|
5ec2f17d09 | ||
|
|
a45fb8ec0c | ||
|
|
bd148bbd5a | ||
|
|
1306e99ca8 | ||
|
|
1a8f063b4b | ||
|
|
c7668b9a78 | ||
|
|
5e3dba2587 | ||
|
|
374f30e0cd | ||
|
|
38d2833931 | ||
|
|
731eacbbca | ||
|
|
a63bb139bf | ||
|
|
a140bef0e6 | ||
|
|
48e5436167 | ||
|
|
0fc664a387 | ||
|
|
5817d50652 | ||
|
|
bb318cf52a | ||
|
|
ec0185da61 | ||
|
|
fc2bdb610f | ||
|
|
a9464de62d | ||
|
|
57bfdedfe1 | ||
|
|
7bdc9c0cb0 | ||
|
|
0d5934d56b | ||
|
|
3a5a61aff9 | ||
|
|
a54b7baa7d | ||
|
|
cd63dd972d | ||
|
|
389058b9b4 | ||
|
|
27e412d1ee | ||
|
|
03dccb0101 | ||
|
|
25b74b48a5 | ||
|
|
6815a96d29 | ||
|
|
e9bb05cc5d | ||
|
|
31c0b94ae6 | ||
|
|
59c242961f | ||
|
|
872ed9e963 | ||
|
|
d6cd155988 | ||
|
|
7f4a61ffb1 | ||
|
|
7737d513a7 | ||
|
|
2bd666efae | ||
|
|
d98265f345 | ||
|
|
b480ff1e48 | ||
|
|
af353b9340 | ||
|
|
96e7505922 | ||
|
|
8f6f2f0018 | ||
|
|
1f25d386df | ||
|
|
2d7a3733da | ||
|
|
ff2a3bc4b0 | ||
|
|
33383a4503 | ||
|
|
f05b94c01e | ||
|
|
fd632f9952 | ||
|
|
fd84d7b492 | ||
|
|
e4808e585a | ||
|
|
5cfeb56f9c | ||
|
|
b947f823d7 | ||
|
|
1520c1c57d | ||
|
|
f8477981d8 | ||
|
|
9b1d596816 | ||
|
|
6a4aa9a956 | ||
|
|
a7b0ef55ad | ||
|
|
87534b6fb6 | ||
|
|
c76cef47ba | ||
|
|
3276dfa03e | ||
|
|
1a14cc305c | ||
|
|
0c7e63d735 | ||
|
|
eb74cb4c5e | ||
|
|
a47d3289d0 | ||
|
|
8ad827130e | ||
|
|
93bdfd3d83 | ||
|
|
22d8889a07 | ||
|
|
7c93944d40 | ||
|
|
ec9dbb6853 | ||
|
|
7a7d814288 | ||
|
|
3babce3ecf | ||
|
|
1610b95b84 | ||
|
|
8c02f8ec31 | ||
|
|
5e0e9da6c4 | ||
|
|
51abe71421 | ||
|
|
00d74bfebb | ||
|
|
39e0ef68ca | ||
|
|
cff20f88cd | ||
|
|
a34c59762b | ||
|
|
397da5857f | ||
|
|
43ceb052d8 | ||
|
|
6eced60bf5 | ||
|
|
7c2f5352d2 | ||
|
|
e6e377beff | ||
|
|
6da9de6591 | ||
|
|
7549aee656 | ||
|
|
da4a6266c5 | ||
|
|
6ac938f2a6 | ||
|
|
990fc83269 | ||
|
|
5d5ff47d5e | ||
|
|
9f30bba9a8 | ||
|
|
6014bb76b6 | ||
|
|
e25b54f86a | ||
|
|
5959963b9d | ||
|
|
4fbb71484d | ||
|
|
f8e8132b58 | ||
|
|
a1dd170641 | ||
|
|
fe2ddf9d60 | ||
|
|
dfc99a7756 | ||
|
|
c2556facc2 | ||
|
|
31b1d58a70 | ||
|
|
0ac9f6f663 | ||
|
|
a30b75a2a8 | ||
|
|
dbd4363ff8 | ||
|
|
ad1b01f225 | ||
|
|
e1679790f7 | ||
|
|
ae594061e9 | ||
|
|
9e02147d4c | ||
|
|
2b3d1c8ee5 | ||
|
|
4c91dbcff0 | ||
|
|
83f68f13db | ||
|
|
994814864c | ||
|
|
f24135f5b0 | ||
|
|
5745727031 | ||
|
|
ae0d685b29 | ||
|
|
f34a49e19c | ||
|
|
9e557a0ebe | ||
|
|
0df3c26f04 | ||
|
|
0df86454ce | ||
|
|
63f0ae1c7c | ||
|
|
d9f232683d | ||
|
|
ad9d77d33f | ||
|
|
5a8479f4e9 | ||
|
|
f4e94c0ad1 | ||
|
|
6f3183823f | ||
|
|
01bb39da6a | ||
|
|
43f9830e8d | ||
|
|
4c2174958f | ||
|
|
07747e73d6 | ||
|
|
60946b5e9d | ||
|
|
0365fa5421 | ||
|
|
2a7ad596a1 | ||
|
|
6c455ab5ce | ||
|
|
7c062163a1 | ||
|
|
d6a256247c | ||
|
|
0e2c699518 | ||
|
|
c04d0e9419 | ||
|
|
cf22039562 | ||
|
|
2a62982885 | ||
|
|
1e1624ed4c | ||
|
|
d0feff00a7 | ||
|
|
6c2a3d5d43 | ||
|
|
914b035b3f | ||
|
|
6b3af78873 | ||
|
|
6adeef7e70 | ||
|
|
44dc55c5ac | ||
|
|
3c419677e1 | ||
|
|
aadb962b6c | ||
|
|
c6080ca02e | ||
|
|
506448fe61 | ||
|
|
a42176599f | ||
|
|
adef0a4138 | ||
|
|
c9816cce18 | ||
|
|
c6c2b9d809 | ||
|
|
d9b379ae51 | ||
|
|
dd2afe19f6 | ||
|
|
0326a4d42a | ||
|
|
b4d12966f3 | ||
|
|
5a2d729ba9 | ||
|
|
666916fae2 | ||
|
|
4b4008642d | ||
|
|
7c2007faa3 | ||
|
|
6b87118fc6 | ||
|
|
49aba4fb1c | ||
|
|
9ace374855 | ||
|
|
a4d5774ae3 | ||
|
|
d0d10daa44 | ||
|
|
e519539468 | ||
|
|
7051831539 | ||
|
|
c842e68288 | ||
|
|
a295bf2fb6 | ||
|
|
4cd40391c5 | ||
|
|
7af4ba34af | ||
|
|
54c69311ed | ||
|
|
62af836554 | ||
|
|
f9b3ca01f9 | ||
|
|
0c4149225f | ||
|
|
7136ecc2fe | ||
|
|
0c14145ef9 | ||
|
|
6618b47123 | ||
|
|
ab5285dee6 | ||
|
|
fdd7e022e9 | ||
|
|
90d7f0200a | ||
|
|
2713e186d3 | ||
|
|
ffdb4cfff0 | ||
|
|
b3b7522250 | ||
|
|
0870030d1c | ||
|
|
3fece44aef | ||
|
|
e5de35a769 | ||
|
|
44ff1517d1 | ||
|
|
d77389abe3 | ||
|
|
1a62a13aea | ||
|
|
97fdba0fae | ||
|
|
5c2a9e8362 | ||
|
|
1aaafa4638 | ||
|
|
71c4fcc382 | ||
|
|
9af1c2320b | ||
|
|
cdabf9060e | ||
|
|
4eb1948e4c | ||
|
|
fe0ba5e3ba | ||
|
|
8add13743a | ||
|
|
9964e3425a | ||
|
|
ddaa464d97 | ||
|
|
8f9d643923 | ||
|
|
d9ab100da3 | ||
|
|
7d32c63398 | ||
|
|
89c6eba913 | ||
|
|
c38247abe4 | ||
|
|
f9eddc9b18 | ||
|
|
17de5836bd | ||
|
|
fe37a2e7e0 | ||
|
|
aad840da59 | ||
|
|
f2d5d21581 | ||
|
|
f3bc1a8f8a | ||
|
|
f069ecdb76 | ||
|
|
493aef0241 | ||
|
|
7bca847f11 | ||
|
|
0cb60e1c10 | ||
|
|
ecd5ce92c2 | ||
|
|
aa67b47053 | ||
|
|
e2669eb370 | ||
|
|
c4bbbf372c | ||
|
|
0c59953cb5 | ||
|
|
b10f17de78 | ||
|
|
bbf9787fb3 | ||
|
|
d11c1eb439 | ||
|
|
548c531956 | ||
|
|
4e75479831 | ||
|
|
633b551e5d | ||
|
|
f734142419 | ||
|
|
aa9a3e9349 | ||
|
|
ab950135ff | ||
|
|
b4e0e89b05 | ||
|
|
12a33960ff | ||
|
|
65ed0c0c05 | ||
|
|
6eb7693294 | ||
|
|
7ec25b4f62 | ||
|
|
b3bc78d23c | ||
|
|
bd4786f128 | ||
|
|
81241170e5 | ||
|
|
e0df371a8d | ||
|
|
cfea51d9ee | ||
|
|
037f5ae4c8 | ||
|
|
d6b7587bbe | ||
|
|
0dffd86287 | ||
|
|
c75a44c620 | ||
|
|
cbf3041dde | ||
|
|
d0b43f3802 | ||
|
|
1ee672c020 | ||
|
|
ad8924ed13 | ||
|
|
cff0e1cf1e | ||
|
|
02f83e4b4a | ||
|
|
6bc5ceac3e | ||
|
|
3a20862d0c | ||
|
|
0e331dd177 | ||
|
|
ab4f6adb19 | ||
|
|
50834be4db | ||
|
|
260d21afd0 | ||
|
|
3b98073ad4 | ||
|
|
6bd2c1ba74 | ||
|
|
968cc0eb82 | ||
|
|
3ce385ef23 | ||
|
|
c6581782d0 | ||
|
|
61977ebe86 | ||
|
|
56b71d0f02 | ||
|
|
f6ab060545 | ||
|
|
554c4332c4 | ||
|
|
9d689693b4 | ||
|
|
26bc94fc46 | ||
|
|
6837c41090 | ||
|
|
8fe0e60208 | ||
|
|
00b111fbe3 | ||
|
|
0bebd3e338 | ||
|
|
d5e0a26f55 | ||
|
|
48ebe91713 | ||
|
|
5bc3c074f8 | ||
|
|
f5b5a9a657 | ||
|
|
ac835c80e9 | ||
|
|
2cf0bb4fa5 | ||
|
|
0f44246795 | ||
|
|
64307f323f | ||
|
|
616b8e0a45 | ||
|
|
2c0690a8ee | ||
|
|
2f361de693 | ||
|
|
457380c065 | ||
|
|
96e3d00e74 | ||
|
|
d224e08145 | ||
|
|
13ced00a35 | ||
|
|
5c60a862e5 | ||
|
|
78c9330666 | ||
|
|
01fc7a7fd4 | ||
|
|
0200fb3a21 | ||
|
|
e977963763 | ||
|
|
824d9aaf85 | ||
|
|
4db3e5e542 | ||
|
|
a8b293a510 | ||
|
|
4a4f48cec8 | ||
|
|
7e5cf65ea3 | ||
|
|
bb7417ffbd | ||
|
|
085cf34a49 | ||
|
|
be27a92fc9 | ||
|
|
253137a6b8 | ||
|
|
fce7ab7d24 | ||
|
|
71f6b355c4 | ||
|
|
110b545454 | ||
|
|
5b0e3d375a | ||
|
|
9e05cb48fe | ||
|
|
6d67ca72a0 | ||
|
|
0626081eee | ||
|
|
199d52b39f | ||
|
|
204cad8448 | ||
|
|
8c6096d60e | ||
|
|
9de9fb5863 |
6
.github/CODEOWNERS
vendored
@@ -5,6 +5,6 @@
|
|||||||
/frontend/ @YounixM
|
/frontend/ @YounixM
|
||||||
/frontend/src/container/MetricsApplication @srikanthccv
|
/frontend/src/container/MetricsApplication @srikanthccv
|
||||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||||
/deploy/ @prashant-shahi
|
/deploy/ @SigNoz/devops
|
||||||
/sample-apps/ @prashant-shahi
|
/sample-apps/ @SigNoz/devops
|
||||||
.github @prashant-shahi
|
.github @SigNoz/devops
|
||||||
|
|||||||
49
.github/ISSUE_TEMPLATE/request_dashboard.md
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
name: Request Dashboard
|
||||||
|
about: Request a new dashboard for the SigNoz Dashboards repository
|
||||||
|
title: '[Dashboard Request] '
|
||||||
|
labels: 'dashboard-template'
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Use this template to request a new dashboard for the SigNoz Dashboards repository. Providing detailed information will help us understand your needs better and speed up the dashboard creation process. -->
|
||||||
|
|
||||||
|
## Dashboard Name
|
||||||
|
|
||||||
|
<!-- Provide the name for the requested dashboard. Be specific (e.g., "MySQL Monitoring Dashboard"). -->
|
||||||
|
|
||||||
|
## Expected Dashboard Sections and Panels
|
||||||
|
|
||||||
|
(Can be tweaked (add or remove panels/sections) according to available metrics)
|
||||||
|
|
||||||
|
### Section Name
|
||||||
|
|
||||||
|
<!-- Brief description of what this section should display (e.g., "Resource usage metrics for MySQL database"). -->
|
||||||
|
|
||||||
|
### Panel Name
|
||||||
|
|
||||||
|
<!-- Description of the panel (e.g., "Displays current CPU usage, memory usage, etc."). -->
|
||||||
|
|
||||||
|
<!-- - **Example:**
|
||||||
|
- **Section**: Resource Metrics
|
||||||
|
- **Panel**: CPU Usage - Displays the current CPU usage across all database instances.
|
||||||
|
- **Panel**: Memory Usage - Displays the total memory used by the MySQL process. -->
|
||||||
|
|
||||||
|
<!-- Repeat this format for any additional sections or panels. -->
|
||||||
|
|
||||||
|
## Expected Dashboard Variables
|
||||||
|
|
||||||
|
<!-- List any dashboard variables that should be included in the dashboard. Examples could be `deployment.environment`, `hostname`, `region`, etc. -->
|
||||||
|
|
||||||
|
## Additional Comments or Requirements
|
||||||
|
|
||||||
|
<!-- Include any other details, special requirements, or specific visualizations you'd like to request for this dashboard. -->
|
||||||
|
|
||||||
|
## References or Screenshots
|
||||||
|
|
||||||
|
<!-- Add any references or screenshots of requested dashboard if available. -->
|
||||||
|
|
||||||
|
## 📋 Notes
|
||||||
|
|
||||||
|
Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request.
|
||||||
8
.github/workflows/build.yaml
vendored
@@ -8,6 +8,13 @@ on:
|
|||||||
- release/v*
|
- release/v*
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
check-no-ee-references:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Run check
|
||||||
|
run: make check-no-ee-references
|
||||||
|
|
||||||
build-frontend:
|
build-frontend:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -36,7 +43,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd frontend && yarn install
|
run: cd frontend && yarn install
|
||||||
- name: Run ESLint
|
- name: Run ESLint
|
||||||
|
|||||||
2
.github/workflows/dependency-review.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
|||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
with:
|
with:
|
||||||
fail-on-severity: high
|
fail-on-severity: high
|
||||||
uses: actions/dependency-review-action@v2
|
uses: actions/dependency-review-action@v3
|
||||||
|
|||||||
11
.github/workflows/e2e-k3s.yaml
vendored
@@ -15,6 +15,11 @@ jobs:
|
|||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
- name: Build query-service image
|
- name: Build query-service image
|
||||||
env:
|
env:
|
||||||
DEV_BUILD: 1
|
DEV_BUILD: 1
|
||||||
@@ -65,9 +70,9 @@ jobs:
|
|||||||
- name: Kick off a sample-app workload
|
- name: Kick off a sample-app workload
|
||||||
run: |
|
run: |
|
||||||
# start the locust swarm
|
# start the locust swarm
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl --namespace sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||||
|
|
||||||
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
||||||
id: get-subdomain
|
id: get-subdomain
|
||||||
|
|||||||
31
.github/workflows/jest-coverage-changes.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Jest Coverage - changed files
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: develop
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: "refs/heads/develop"
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||||
|
|
||||||
|
- name: Fetch branch
|
||||||
|
run: git fetch origin ${{ github.event.pull_request.head.ref }}
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: lts/*
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: cd frontend && npm install -g yarn && yarn
|
||||||
|
|
||||||
|
- name: npm run test:changedsince
|
||||||
|
run: cd frontend && npm run i18n:generate-hash && npm run test:changedsince
|
||||||
29
.github/workflows/push.yaml
vendored
@@ -9,7 +9,6 @@ on:
|
|||||||
- v*
|
- v*
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
image-build-and-push-query-service:
|
image-build-and-push-query-service:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -20,13 +19,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.21"
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -64,13 +63,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.21"
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -115,11 +114,11 @@ jobs:
|
|||||||
run: npm run lint
|
run: npm run lint
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -151,7 +150,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||||
|
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
run: yarn install
|
run: yarn install
|
||||||
@@ -164,11 +169,11 @@ jobs:
|
|||||||
run: npm run lint
|
run: npm run lint
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|||||||
71
.github/workflows/staging-deployment.yaml
vendored
@@ -9,34 +9,47 @@ jobs:
|
|||||||
name: Deploy latest develop branch to staging
|
name: Deploy latest develop branch to staging
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: staging
|
environment: staging
|
||||||
|
permissions:
|
||||||
|
contents: 'read'
|
||||||
|
id-token: 'write'
|
||||||
steps:
|
steps:
|
||||||
- name: Executing remote ssh commands using ssh key
|
- id: 'auth'
|
||||||
uses: appleboy/ssh-action@v0.1.8
|
uses: 'google-github-actions/auth@v2'
|
||||||
env:
|
|
||||||
GITHUB_BRANCH: develop
|
|
||||||
GITHUB_SHA: ${{ github.sha }}
|
|
||||||
with:
|
with:
|
||||||
host: ${{ secrets.HOST_DNS }}
|
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||||
username: ${{ secrets.USERNAME }}
|
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||||
key: ${{ secrets.SSH_KEY }}
|
|
||||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
- name: 'sdk'
|
||||||
command_timeout: 60m
|
uses: 'google-github-actions/setup-gcloud@v2'
|
||||||
script: |
|
|
||||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
- name: 'ssh'
|
||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
shell: bash
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
env:
|
||||||
export OTELCOL_TAG="main"
|
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
GITHUB_SHA: ${{ github.sha }}
|
||||||
docker system prune --force
|
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||||
docker pull signoz/signoz-otel-collector:main
|
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||||
docker pull signoz/signoz-schema-migrator:main
|
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||||
cd ~/signoz
|
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||||
git status
|
run: |
|
||||||
git add .
|
read -r -d '' COMMAND <<EOF || true
|
||||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||||
git fetch origin
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
git checkout ${GITHUB_BRANCH}
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
git pull
|
export OTELCOL_TAG="main"
|
||||||
make build-ee-query-service-amd64
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
make build-frontend-amd64
|
docker system prune --force
|
||||||
make run-signoz
|
docker pull signoz/signoz-otel-collector:main
|
||||||
|
docker pull signoz/signoz-schema-migrator:main
|
||||||
|
cd ~/signoz
|
||||||
|
git status
|
||||||
|
git add .
|
||||||
|
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||||
|
git fetch origin
|
||||||
|
git checkout ${GITHUB_BRANCH}
|
||||||
|
git pull
|
||||||
|
make build-ee-query-service-amd64
|
||||||
|
make build-frontend-amd64
|
||||||
|
make run-testing
|
||||||
|
EOF
|
||||||
|
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||||
|
|||||||
66
.github/workflows/testing-deployment.yaml
vendored
@@ -9,32 +9,48 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: testing
|
environment: testing
|
||||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||||
|
permissions:
|
||||||
|
contents: 'read'
|
||||||
|
id-token: 'write'
|
||||||
steps:
|
steps:
|
||||||
- name: Executing remote ssh commands using ssh key
|
- id: 'auth'
|
||||||
uses: appleboy/ssh-action@v0.1.8
|
uses: 'google-github-actions/auth@v2'
|
||||||
|
with:
|
||||||
|
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||||
|
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||||
|
|
||||||
|
- name: 'sdk'
|
||||||
|
uses: 'google-github-actions/setup-gcloud@v2'
|
||||||
|
|
||||||
|
- name: 'ssh'
|
||||||
|
shell: bash
|
||||||
env:
|
env:
|
||||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||||
GITHUB_SHA: ${{ github.sha }}
|
GITHUB_SHA: ${{ github.sha }}
|
||||||
with:
|
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||||
host: ${{ secrets.HOST_DNS }}
|
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||||
username: ${{ secrets.USERNAME }}
|
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||||
key: ${{ secrets.SSH_KEY }}
|
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
run: |
|
||||||
command_timeout: 60m
|
read -r -d '' COMMAND <<EOF || true
|
||||||
script: |
|
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export DEV_BUILD="1"
|
||||||
export DEV_BUILD="1"
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
docker system prune --force
|
||||||
docker system prune --force
|
cd ~/signoz
|
||||||
cd ~/signoz
|
git status
|
||||||
git status
|
git add .
|
||||||
git add .
|
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
git fetch origin
|
||||||
git fetch origin
|
git checkout develop
|
||||||
git checkout ${GITHUB_BRANCH}
|
git pull
|
||||||
git pull
|
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||||
make build-ee-query-service-amd64
|
git branch -D ${GITHUB_BRANCH}
|
||||||
make build-frontend-amd64
|
git checkout --track origin/${GITHUB_BRANCH}
|
||||||
make run-signoz
|
make build-ee-query-service-amd64
|
||||||
|
make build-frontend-amd64
|
||||||
|
make run-testing
|
||||||
|
EOF
|
||||||
|
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||||
|
|||||||
10
.gitignore
vendored
@@ -47,6 +47,7 @@ ee/query-service/signoz.db
|
|||||||
ee/query-service/tests/test-deploy/data/
|
ee/query-service/tests/test-deploy/data/
|
||||||
|
|
||||||
# local data
|
# local data
|
||||||
|
*.backup
|
||||||
*.db
|
*.db
|
||||||
/deploy/docker/clickhouse-setup/data/
|
/deploy/docker/clickhouse-setup/data/
|
||||||
/deploy/docker-swarm/clickhouse-setup/data/
|
/deploy/docker-swarm/clickhouse-setup/data/
|
||||||
@@ -61,4 +62,11 @@ e2e/test-results/
|
|||||||
e2e/playwright-report/
|
e2e/playwright-report/
|
||||||
e2e/blob-report/
|
e2e/blob-report/
|
||||||
e2e/playwright/.cache/
|
e2e/playwright/.cache/
|
||||||
e2e/.auth
|
e2e/.auth
|
||||||
|
|
||||||
|
# go
|
||||||
|
vendor/
|
||||||
|
**/main/**
|
||||||
|
|
||||||
|
# git-town
|
||||||
|
.git-branches.toml
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
|
|
||||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
|
||||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
|
||||||
|
|
||||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
|
||||||
@@ -30,6 +30,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
|
|||||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||||
|
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
|
||||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||||
|
|
||||||
# 1. General Instructions 📝
|
# 1. General Instructions 📝
|
||||||
@@ -37,7 +38,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s
|
|||||||
## 1.1 For Creating Issue(s)
|
## 1.1 For Creating Issue(s)
|
||||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||||
|
|
||||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||||
|
|
||||||
#### Details like these are incredibly useful:
|
#### Details like these are incredibly useful:
|
||||||
|
|
||||||
@@ -56,7 +57,7 @@ Before making any significant changes and before filing a new issue, please chec
|
|||||||
Discussing your proposed changes ahead of time will make the contribution
|
Discussing your proposed changes ahead of time will make the contribution
|
||||||
process smooth for everyone 🙌.
|
process smooth for everyone 🙌.
|
||||||
|
|
||||||
**[`^top^`](#)**
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
@@ -97,13 +98,14 @@ GitHub provides additional document on [forking a repository](https://help.githu
|
|||||||
stability and quality of the component.
|
stability and quality of the component.
|
||||||
|
|
||||||
|
|
||||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
|
||||||
|
|
||||||
### Pointers:
|
### Pointers:
|
||||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||||
|
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
@@ -117,7 +119,7 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
|
|||||||
|
|
||||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||||
|
|
||||||
**[`^top^`](#)**
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
@@ -127,14 +129,13 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be
|
|||||||
|
|
||||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||||
|
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
|
||||||
|
|
||||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||||
|
|
||||||
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
|
||||||
|
|
||||||
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
**[`^top^`](#)**
|
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
@@ -188,7 +189,7 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
|||||||
### Important Notes:
|
### Important Notes:
|
||||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||||
|
|
||||||
**[`^top^`](#)**
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||||
|
|
||||||
@@ -209,7 +210,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
|||||||
|
|
||||||
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
||||||
|
|
||||||
**[`^top^`](#)**
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
@@ -309,7 +310,7 @@ Click the button below. A workspace with all required environments will be creat
|
|||||||
|
|
||||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||||
|
|
||||||
**[`^top^`](#)**
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
@@ -347,7 +348,7 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
|
|||||||
```bash
|
```bash
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||||
```
|
```
|
||||||
|
|
||||||
**5.1.3 To stop the load generation:**
|
**5.1.3 To stop the load generation:**
|
||||||
@@ -365,10 +366,21 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-
|
|||||||
| HOTROD_NAMESPACE=sample-application bash
|
| HOTROD_NAMESPACE=sample-application bash
|
||||||
```
|
```
|
||||||
|
|
||||||
**[`^top^`](#)**
|
**[`^top^`](#contributing-guidelines)**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# 6. Contribute to Dashboards 📈
|
||||||
|
|
||||||
|
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
|
||||||
|
|
||||||
|
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
|
||||||
|
|
||||||
|
1. Create a dashboard JSON file.
|
||||||
|
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
|
||||||
|
3. Include screenshots of the dashboard in the `assets/` directory.
|
||||||
|
4. Submit a pull request for review.
|
||||||
|
|
||||||
## Other Ways to Contribute
|
## Other Ways to Contribute
|
||||||
|
|
||||||
There are many other ways to get involved with the community and to participate in this project:
|
There are many other ways to get involved with the community and to participate in this project:
|
||||||
@@ -379,7 +391,6 @@ There are many other ways to get involved with the community and to participate
|
|||||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||||
- Tell others about the project on Twitter, your blog, etc.
|
- Tell others about the project on Twitter, your blog, etc.
|
||||||
|
|
||||||
|
|
||||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||||
|
|
||||||
Thank You!
|
Thank You!
|
||||||
|
|||||||
13
Makefile
@@ -156,6 +156,9 @@ pull-signoz:
|
|||||||
run-signoz:
|
run-signoz:
|
||||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||||
|
|
||||||
|
run-testing:
|
||||||
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
|
||||||
|
|
||||||
down-signoz:
|
down-signoz:
|
||||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||||
|
|
||||||
@@ -175,6 +178,15 @@ clear-swarm-ch:
|
|||||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||||
|
|
||||||
|
check-no-ee-references:
|
||||||
|
@echo "Checking for 'ee' package references in 'pkg' directory..."
|
||||||
|
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
|
||||||
|
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
|
||||||
|
exit 1; \
|
||||||
|
else \
|
||||||
|
echo "No references to 'ee' packages found in 'pkg' directory"; \
|
||||||
|
fi
|
||||||
|
|
||||||
test:
|
test:
|
||||||
go test ./pkg/query-service/app/metrics/...
|
go test ./pkg/query-service/app/metrics/...
|
||||||
go test ./pkg/query-service/cache/...
|
go test ./pkg/query-service/cache/...
|
||||||
@@ -185,3 +197,4 @@ test:
|
|||||||
go test ./pkg/query-service/tests/integration/...
|
go test ./pkg/query-service/tests/integration/...
|
||||||
go test ./pkg/query-service/rules/...
|
go test ./pkg/query-service/rules/...
|
||||||
go test ./pkg/query-service/collectorsimulator/...
|
go test ./pkg/query-service/collectorsimulator/...
|
||||||
|
go test ./pkg/query-service/postprocess/...
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
|
|||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- Node.js
|
||||||
- Go
|
- Go
|
||||||
- PHP
|
- PHP
|
||||||
- .NET
|
- .NET
|
||||||
@@ -198,14 +198,14 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
|||||||
|
|
||||||
#### Frontend
|
#### Frontend
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
|
||||||
- [Yunus M](https://github.com/YounixM)
|
- [Yunus M](https://github.com/YounixM)
|
||||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
- [Vikrant Gupta](https://github.com/vikrantgupta25)
|
||||||
|
- [Sagar Rajput](https://github.com/SagarRajput-7)
|
||||||
|
|
||||||
#### DevOps
|
#### DevOps
|
||||||
|
|
||||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
- [Dhawal Sanghvi](https://github.com/dhawal1248)
|
- [Vibhu Pandey](https://github.com/grandwizard28)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|||||||
@@ -23,6 +23,9 @@
|
|||||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||||
-->
|
-->
|
||||||
<level>information</level>
|
<level>information</level>
|
||||||
|
<formatting>
|
||||||
|
<type>json</type>
|
||||||
|
</formatting>
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||||
<!-- Rotation policy
|
<!-- Rotation policy
|
||||||
@@ -649,12 +652,12 @@
|
|||||||
|
|
||||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||||
-->
|
-->
|
||||||
<!--
|
|
||||||
<macros>
|
<macros>
|
||||||
<shard>01</shard>
|
<shard>01</shard>
|
||||||
<replica>example01-01-1</replica>
|
<replica>example01-01-1</replica>
|
||||||
</macros>
|
</macros>
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
image: clickhouse/clickhouse-server:23.11.1-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
tty: true
|
tty: true
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
@@ -22,7 +22,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
"wget",
|
"wget",
|
||||||
"--spider",
|
"--spider",
|
||||||
"-q",
|
"-q",
|
||||||
"localhost:8123/ping"
|
"0.0.0.0:8123/ping"
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
@@ -133,7 +133,7 @@ services:
|
|||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.4
|
image: signoz/alertmanager:0.23.7
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
command:
|
command:
|
||||||
@@ -146,11 +146,11 @@ services:
|
|||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.37.1
|
image: signoz/query-service:0.56.0
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
# "--prefer-delta=true"
|
"--use-logs-new-schema=true"
|
||||||
]
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
@@ -160,7 +160,7 @@ services:
|
|||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
- DASHBOARDS_PATH=/root/config/dashboards
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
@@ -186,7 +186,7 @@ services:
|
|||||||
<<: *db-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.37.1
|
image: signoz/frontend:0.56.0
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@@ -199,7 +199,7 @@ services:
|
|||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.88.8
|
image: signoz/signoz-otel-collector:0.102.12
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
@@ -211,6 +211,7 @@ services:
|
|||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
- /:/hostfs:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||||
@@ -237,7 +238,7 @@ services:
|
|||||||
- query-service
|
- query-service
|
||||||
|
|
||||||
otel-collector-migrator:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-schema-migrator:0.88.8
|
image: signoz/signoz-schema-migrator:0.102.12
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ receivers:
|
|||||||
# endpoint: 0.0.0.0:6832
|
# endpoint: 0.0.0.0:6832
|
||||||
hostmetrics:
|
hostmetrics:
|
||||||
collection_interval: 30s
|
collection_interval: 30s
|
||||||
|
root_path: /hostfs
|
||||||
scrapers:
|
scrapers:
|
||||||
cpu: {}
|
cpu: {}
|
||||||
load: {}
|
load: {}
|
||||||
@@ -77,7 +78,16 @@ processors:
|
|||||||
# This is added to ensure the uniqueness of the timeseries
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
# collectors result in incorrect APM metrics
|
# collectors result in incorrect APM metrics
|
||||||
- name: 'signoz.collector.id'
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
# memory_limiter:
|
# memory_limiter:
|
||||||
# # 80% of maximum memory up to 2G
|
# # 80% of maximum memory up to 2G
|
||||||
# limit_mib: 1500
|
# limit_mib: 1500
|
||||||
@@ -98,6 +108,7 @@ processors:
|
|||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
dimensions:
|
dimensions:
|
||||||
- name: service.namespace
|
- name: service.namespace
|
||||||
default: default
|
default: default
|
||||||
@@ -107,31 +118,33 @@ processors:
|
|||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
# collectors result in incorrect APM metrics
|
# collectors result in incorrect APM metrics
|
||||||
- name: signoz.collector.id
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
|
|
||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
# logging: {}
|
# logging: {}
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
sending_queue:
|
use_new_schema: true
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure:
|
|
||||||
enabled: true
|
|
||||||
initial_interval: 5s
|
|
||||||
max_interval: 30s
|
|
||||||
max_elapsed_time: 300s
|
|
||||||
|
|
||||||
extensions:
|
extensions:
|
||||||
health_check:
|
health_check:
|
||||||
endpoint: 0.0.0.0:13133
|
endpoint: 0.0.0.0:13133
|
||||||
@@ -142,6 +155,8 @@ extensions:
|
|||||||
|
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
|
logs:
|
||||||
|
encoding: json
|
||||||
metrics:
|
metrics:
|
||||||
address: 0.0.0.0:8888
|
address: 0.0.0.0:8888
|
||||||
extensions: [health_check, zpages, pprof]
|
extensions: [health_check, zpages, pprof]
|
||||||
|
|||||||
@@ -22,4 +22,4 @@ rule_files:
|
|||||||
scrape_configs: []
|
scrape_configs: []
|
||||||
|
|
||||||
remote_read:
|
remote_read:
|
||||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
- url: tcp://clickhouse:9000/signoz_metrics
|
||||||
|
|||||||
@@ -23,6 +23,9 @@
|
|||||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||||
-->
|
-->
|
||||||
<level>information</level>
|
<level>information</level>
|
||||||
|
<formatting>
|
||||||
|
<type>json</type>
|
||||||
|
</formatting>
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||||
<!-- Rotation policy
|
<!-- Rotation policy
|
||||||
@@ -649,12 +652,12 @@
|
|||||||
|
|
||||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||||
-->
|
-->
|
||||||
<!--
|
|
||||||
<macros>
|
<macros>
|
||||||
<shard>01</shard>
|
<shard>01</shard>
|
||||||
<replica>example01-01-1</replica>
|
<replica>example01-01-1</replica>
|
||||||
</macros>
|
</macros>
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
|
include:
|
||||||
|
- test-app-docker-compose.yaml
|
||||||
|
|
||||||
services:
|
services:
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
@@ -19,7 +22,7 @@ services:
|
|||||||
- ZOO_AUTOPURGE_INTERVAL=1
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
container_name: signoz-clickhouse
|
container_name: signoz-clickhouse
|
||||||
# ports:
|
# ports:
|
||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
@@ -46,7 +49,7 @@ services:
|
|||||||
"wget",
|
"wget",
|
||||||
"--spider",
|
"--spider",
|
||||||
"-q",
|
"-q",
|
||||||
"localhost:8123/ping"
|
"0.0.0.0:8123/ping"
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
@@ -54,7 +57,7 @@ services:
|
|||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
image: signoz/alertmanager:0.23.4
|
image: signoz/alertmanager:0.23.7
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -66,7 +69,7 @@ services:
|
|||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
otel-collector-migrator:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.8}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.12}
|
||||||
container_name: otel-migrator
|
container_name: otel-migrator
|
||||||
command:
|
command:
|
||||||
- "--dsn=tcp://clickhouse:9000"
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
@@ -81,7 +84,7 @@ services:
|
|||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
otel-collector:
|
otel-collector:
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
image: signoz/signoz-otel-collector:0.88.8
|
image: signoz/signoz-otel-collector:0.102.12
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
@@ -93,6 +96,8 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
- /:/hostfs:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
ports:
|
ports:
|
||||||
@@ -126,29 +131,3 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- otel-collector
|
- otel-collector
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
|
|
||||||
hotrod:
|
|
||||||
image: jaegertracing/example-hotrod:1.30
|
|
||||||
container_name: hotrod
|
|
||||||
logging:
|
|
||||||
options:
|
|
||||||
max-size: 50m
|
|
||||||
max-file: "3"
|
|
||||||
command: [ "all" ]
|
|
||||||
environment:
|
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
|
||||||
|
|
||||||
load-hotrod:
|
|
||||||
image: "signoz/locust:1.2.3"
|
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
|
||||||
environment:
|
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
|
||||||
LOCUST_MODE: standalone
|
|
||||||
NO_PROXY: standalone
|
|
||||||
TASK_DELAY_FROM: 5
|
|
||||||
TASK_DELAY_TO: 30
|
|
||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
|
||||||
volumes:
|
|
||||||
- ../common/locust-scripts:/locust
|
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ services:
|
|||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
# "--prefer-delta=true"
|
"--use-logs-new-schema=true"
|
||||||
]
|
]
|
||||||
ports:
|
ports:
|
||||||
- "6060:6060"
|
- "6060:6060"
|
||||||
|
|||||||
279
deploy/docker/clickhouse-setup/docker-compose-minimal.yaml
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
restart: on-failure
|
||||||
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
|
tty: true
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
# - zookeeper-2
|
||||||
|
# - zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"0.0.0.0:8123/ping"
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-db-depend: &db-depend
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.1
|
||||||
|
container_name: signoz-zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-2:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: signoz-zookeeper-2
|
||||||
|
# hostname: zookeeper-2
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2182:2181"
|
||||||
|
# - "2889:2888"
|
||||||
|
# - "3889:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=2
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-3:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: signoz-zookeeper-3
|
||||||
|
# hostname: zookeeper-3
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2183:2181"
|
||||||
|
# - "2890:2888"
|
||||||
|
# - "3890:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=3
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
clickhouse:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: signoz-clickhouse
|
||||||
|
hostname: clickhouse
|
||||||
|
ports:
|
||||||
|
- "9000:9000"
|
||||||
|
- "8123:8123"
|
||||||
|
- "9181:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
# clickhouse-2:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: signoz-clickhouse-2
|
||||||
|
# hostname: clickhouse-2
|
||||||
|
# ports:
|
||||||
|
# - "9001:9000"
|
||||||
|
# - "8124:8123"
|
||||||
|
# - "9182:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
|
||||||
|
# clickhouse-3:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: signoz-clickhouse-3
|
||||||
|
# hostname: clickhouse-3
|
||||||
|
# ports:
|
||||||
|
# - "9002:9000"
|
||||||
|
# - "8125:8123"
|
||||||
|
# - "9183:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||||
|
container_name: signoz-alertmanager
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
|
depends_on:
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: on-failure
|
||||||
|
command:
|
||||||
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
|
||||||
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
|
query-service:
|
||||||
|
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
|
||||||
|
container_name: signoz-query-service
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
"--use-logs-new-schema=true"
|
||||||
|
]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
|
- ../dashboards:/root/config/dashboards
|
||||||
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
|
environment:
|
||||||
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
|
- STORAGE=clickhouse
|
||||||
|
- GODEBUG=netdns=go
|
||||||
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8080/api/v1/health"
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
<<: *db-depend
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
|
||||||
|
container_name: signoz-frontend
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
|
- query-service
|
||||||
|
ports:
|
||||||
|
- "3301:3301"
|
||||||
|
volumes:
|
||||||
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.12}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
|
otel-collector:
|
||||||
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.12}
|
||||||
|
container_name: signoz-otel-collector
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
|
user: root # required for reading docker container logs
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
- /:/hostfs:ro
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
|
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||||
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
|
ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
logspout:
|
||||||
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
container_name: signoz-logspout
|
||||||
|
volumes:
|
||||||
|
- /etc/hostname:/etc/host_hostname:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: syslog+tcp://otel-collector:2255
|
||||||
|
depends_on:
|
||||||
|
- otel-collector
|
||||||
|
restart: on-failure
|
||||||
285
deploy/docker/clickhouse-setup/docker-compose.testing.yaml
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
version: "2.4"
|
||||||
|
|
||||||
|
include:
|
||||||
|
- test-app-docker-compose.yaml
|
||||||
|
|
||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
restart: on-failure
|
||||||
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
|
tty: true
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
# - zookeeper-2
|
||||||
|
# - zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"0.0.0.0:8123/ping"
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-db-depend: &db-depend
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.1
|
||||||
|
container_name: signoz-zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-2:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: signoz-zookeeper-2
|
||||||
|
# hostname: zookeeper-2
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2182:2181"
|
||||||
|
# - "2889:2888"
|
||||||
|
# - "3889:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=2
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-3:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: signoz-zookeeper-3
|
||||||
|
# hostname: zookeeper-3
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2183:2181"
|
||||||
|
# - "2890:2888"
|
||||||
|
# - "3890:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=3
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
clickhouse:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: signoz-clickhouse
|
||||||
|
hostname: clickhouse
|
||||||
|
ports:
|
||||||
|
- "9000:9000"
|
||||||
|
- "8123:8123"
|
||||||
|
- "9181:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
# clickhouse-2:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: signoz-clickhouse-2
|
||||||
|
# hostname: clickhouse-2
|
||||||
|
# ports:
|
||||||
|
# - "9001:9000"
|
||||||
|
# - "8124:8123"
|
||||||
|
# - "9182:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
|
||||||
|
# clickhouse-3:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: signoz-clickhouse-3
|
||||||
|
# hostname: clickhouse-3
|
||||||
|
# ports:
|
||||||
|
# - "9002:9000"
|
||||||
|
# - "8125:8123"
|
||||||
|
# - "9183:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||||
|
container_name: signoz-alertmanager
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
|
depends_on:
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: on-failure
|
||||||
|
command:
|
||||||
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
|
||||||
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
|
query-service:
|
||||||
|
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
|
||||||
|
container_name: signoz-query-service
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
"-gateway-url=https://api.staging.signoz.cloud",
|
||||||
|
"--use-logs-new-schema=true"
|
||||||
|
]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
|
- ../dashboards:/root/config/dashboards
|
||||||
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
|
environment:
|
||||||
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
|
- STORAGE=clickhouse
|
||||||
|
- GODEBUG=netdns=go
|
||||||
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8080/api/v1/health"
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
<<: *db-depend
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
|
||||||
|
container_name: signoz-frontend
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
|
- query-service
|
||||||
|
ports:
|
||||||
|
- "3301:3301"
|
||||||
|
volumes:
|
||||||
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.12}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
|
otel-collector:
|
||||||
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.12}
|
||||||
|
container_name: signoz-otel-collector
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
|
user: root # required for reading docker container logs
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
- /:/hostfs:ro
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
|
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||||
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
|
ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
logspout:
|
||||||
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
container_name: signoz-logspout
|
||||||
|
volumes:
|
||||||
|
- /etc/hostname:/etc/host_hostname:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: syslog+tcp://otel-collector:2255
|
||||||
|
depends_on:
|
||||||
|
- otel-collector
|
||||||
|
restart: on-failure
|
||||||
@@ -1,306 +1,3 @@
|
|||||||
version: "2.4"
|
include:
|
||||||
|
- test-app-docker-compose.yaml
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
- docker-compose-minimal.yaml
|
||||||
restart: on-failure
|
|
||||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
|
||||||
image: clickhouse/clickhouse-server:23.11.1-alpine
|
|
||||||
tty: true
|
|
||||||
depends_on:
|
|
||||||
- zookeeper-1
|
|
||||||
# - zookeeper-2
|
|
||||||
# - zookeeper-3
|
|
||||||
logging:
|
|
||||||
options:
|
|
||||||
max-size: 50m
|
|
||||||
max-file: "3"
|
|
||||||
healthcheck:
|
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
|
||||||
test:
|
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"localhost:8123/ping"
|
|
||||||
]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
ulimits:
|
|
||||||
nproc: 65535
|
|
||||||
nofile:
|
|
||||||
soft: 262144
|
|
||||||
hard: 262144
|
|
||||||
|
|
||||||
x-db-depend: &db-depend
|
|
||||||
depends_on:
|
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
otel-collector-migrator:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
# clickhouse-2:
|
|
||||||
# condition: service_healthy
|
|
||||||
# clickhouse-3:
|
|
||||||
# condition: service_healthy
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
zookeeper-1:
|
|
||||||
image: bitnami/zookeeper:3.7.1
|
|
||||||
container_name: signoz-zookeeper-1
|
|
||||||
hostname: zookeeper-1
|
|
||||||
user: root
|
|
||||||
ports:
|
|
||||||
- "2181:2181"
|
|
||||||
- "2888:2888"
|
|
||||||
- "3888:3888"
|
|
||||||
volumes:
|
|
||||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
|
||||||
environment:
|
|
||||||
- ZOO_SERVER_ID=1
|
|
||||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
- ZOO_AUTOPURGE_INTERVAL=1
|
|
||||||
|
|
||||||
# zookeeper-2:
|
|
||||||
# image: bitnami/zookeeper:3.7.0
|
|
||||||
# container_name: signoz-zookeeper-2
|
|
||||||
# hostname: zookeeper-2
|
|
||||||
# user: root
|
|
||||||
# ports:
|
|
||||||
# - "2182:2181"
|
|
||||||
# - "2889:2888"
|
|
||||||
# - "3889:3888"
|
|
||||||
# volumes:
|
|
||||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
|
||||||
# environment:
|
|
||||||
# - ZOO_SERVER_ID=2
|
|
||||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
|
||||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
|
||||||
|
|
||||||
# zookeeper-3:
|
|
||||||
# image: bitnami/zookeeper:3.7.0
|
|
||||||
# container_name: signoz-zookeeper-3
|
|
||||||
# hostname: zookeeper-3
|
|
||||||
# user: root
|
|
||||||
# ports:
|
|
||||||
# - "2183:2181"
|
|
||||||
# - "2890:2888"
|
|
||||||
# - "3890:3888"
|
|
||||||
# volumes:
|
|
||||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
|
||||||
# environment:
|
|
||||||
# - ZOO_SERVER_ID=3
|
|
||||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
|
||||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
|
||||||
|
|
||||||
clickhouse:
|
|
||||||
<<: *clickhouse-defaults
|
|
||||||
container_name: signoz-clickhouse
|
|
||||||
hostname: clickhouse
|
|
||||||
ports:
|
|
||||||
- "9000:9000"
|
|
||||||
- "8123:8123"
|
|
||||||
- "9181:9181"
|
|
||||||
volumes:
|
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
|
||||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
|
||||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
|
||||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
|
||||||
|
|
||||||
# clickhouse-2:
|
|
||||||
# <<: *clickhouse-defaults
|
|
||||||
# container_name: signoz-clickhouse-2
|
|
||||||
# hostname: clickhouse-2
|
|
||||||
# ports:
|
|
||||||
# - "9001:9000"
|
|
||||||
# - "8124:8123"
|
|
||||||
# - "9182:9181"
|
|
||||||
# volumes:
|
|
||||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
|
||||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
|
||||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
|
||||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
|
||||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
|
||||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
|
||||||
|
|
||||||
|
|
||||||
# clickhouse-3:
|
|
||||||
# <<: *clickhouse-defaults
|
|
||||||
# container_name: signoz-clickhouse-3
|
|
||||||
# hostname: clickhouse-3
|
|
||||||
# ports:
|
|
||||||
# - "9002:9000"
|
|
||||||
# - "8125:8123"
|
|
||||||
# - "9183:9181"
|
|
||||||
# volumes:
|
|
||||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
|
||||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
|
||||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
|
||||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
|
||||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
|
||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
|
||||||
|
|
||||||
alertmanager:
|
|
||||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.4}
|
|
||||||
container_name: signoz-alertmanager
|
|
||||||
volumes:
|
|
||||||
- ./data/alertmanager:/data
|
|
||||||
depends_on:
|
|
||||||
query-service:
|
|
||||||
condition: service_healthy
|
|
||||||
restart: on-failure
|
|
||||||
command:
|
|
||||||
- --queryService.url=http://query-service:8085
|
|
||||||
- --storage.path=/data
|
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
|
||||||
|
|
||||||
query-service:
|
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.37.1}
|
|
||||||
container_name: signoz-query-service
|
|
||||||
command:
|
|
||||||
[
|
|
||||||
"-config=/root/config/prometheus.yml",
|
|
||||||
# "--prefer-delta=true"
|
|
||||||
]
|
|
||||||
# ports:
|
|
||||||
# - "6060:6060" # pprof port
|
|
||||||
# - "8080:8080" # query-service port
|
|
||||||
volumes:
|
|
||||||
- ./prometheus.yml:/root/config/prometheus.yml
|
|
||||||
- ../dashboards:/root/config/dashboards
|
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
|
||||||
environment:
|
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
|
||||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
|
||||||
- DASHBOARDS_PATH=/root/config/dashboards
|
|
||||||
- STORAGE=clickhouse
|
|
||||||
- GODEBUG=netdns=go
|
|
||||||
- TELEMETRY_ENABLED=true
|
|
||||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
|
||||||
restart: on-failure
|
|
||||||
healthcheck:
|
|
||||||
test:
|
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"wget",
|
|
||||||
"--spider",
|
|
||||||
"-q",
|
|
||||||
"localhost:8080/api/v1/health"
|
|
||||||
]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
<<: *db-depend
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.37.1}
|
|
||||||
container_name: signoz-frontend
|
|
||||||
restart: on-failure
|
|
||||||
depends_on:
|
|
||||||
- alertmanager
|
|
||||||
- query-service
|
|
||||||
ports:
|
|
||||||
- "3301:3301"
|
|
||||||
volumes:
|
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
otel-collector-migrator:
|
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.8}
|
|
||||||
container_name: otel-migrator
|
|
||||||
command:
|
|
||||||
- "--dsn=tcp://clickhouse:9000"
|
|
||||||
depends_on:
|
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
# clickhouse-2:
|
|
||||||
# condition: service_healthy
|
|
||||||
# clickhouse-3:
|
|
||||||
# condition: service_healthy
|
|
||||||
|
|
||||||
|
|
||||||
otel-collector:
|
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.8}
|
|
||||||
container_name: signoz-otel-collector
|
|
||||||
command:
|
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
|
||||||
"--manager-config=/etc/manager-config.yaml",
|
|
||||||
"--copy-path=/var/tmp/collector-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
user: root # required for reading docker container logs
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
|
||||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
|
||||||
environment:
|
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
|
||||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
|
||||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
|
||||||
ports:
|
|
||||||
# - "1777:1777" # pprof extension
|
|
||||||
- "4317:4317" # OTLP gRPC receiver
|
|
||||||
- "4318:4318" # OTLP HTTP receiver
|
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
|
||||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
|
||||||
# - "9411:9411" # Zipkin port
|
|
||||||
# - "13133:13133" # health check extension
|
|
||||||
# - "14250:14250" # Jaeger gRPC
|
|
||||||
# - "14268:14268" # Jaeger thrift HTTP
|
|
||||||
# - "55678:55678" # OpenCensus receiver
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
restart: on-failure
|
|
||||||
depends_on:
|
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
otel-collector-migrator:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
query-service:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
logspout:
|
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
|
||||||
container_name: signoz-logspout
|
|
||||||
volumes:
|
|
||||||
- /etc/hostname:/etc/host_hostname:ro
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
command: syslog+tcp://otel-collector:2255
|
|
||||||
depends_on:
|
|
||||||
- otel-collector
|
|
||||||
restart: on-failure
|
|
||||||
|
|
||||||
hotrod:
|
|
||||||
image: jaegertracing/example-hotrod:1.30
|
|
||||||
container_name: hotrod
|
|
||||||
logging:
|
|
||||||
options:
|
|
||||||
max-size: 50m
|
|
||||||
max-file: "3"
|
|
||||||
command: [ "all" ]
|
|
||||||
environment:
|
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
|
||||||
|
|
||||||
load-hotrod:
|
|
||||||
image: "signoz/locust:1.2.3"
|
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
|
||||||
environment:
|
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
|
||||||
LOCUST_MODE: standalone
|
|
||||||
NO_PROXY: standalone
|
|
||||||
TASK_DELAY_FROM: 5
|
|
||||||
TASK_DELAY_TO: 30
|
|
||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
|
||||||
volumes:
|
|
||||||
- ../common/locust-scripts:/locust
|
|
||||||
|
|||||||
64
deploy/docker/clickhouse-setup/keeper_config.xml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<logger>
|
||||||
|
<!-- Possible levels [1]:
|
||||||
|
|
||||||
|
- none (turns off logging)
|
||||||
|
- fatal
|
||||||
|
- critical
|
||||||
|
- error
|
||||||
|
- warning
|
||||||
|
- notice
|
||||||
|
- information
|
||||||
|
- debug
|
||||||
|
- trace
|
||||||
|
|
||||||
|
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||||
|
-->
|
||||||
|
<level>information</level>
|
||||||
|
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||||
|
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||||
|
<!-- Rotation policy
|
||||||
|
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
|
||||||
|
-->
|
||||||
|
<size>1000M</size>
|
||||||
|
<count>10</count>
|
||||||
|
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||||
|
</logger>
|
||||||
|
|
||||||
|
<listen_host>0.0.0.0</listen_host>
|
||||||
|
<max_connections>4096</max_connections>
|
||||||
|
|
||||||
|
<keeper_server>
|
||||||
|
<tcp_port>9181</tcp_port>
|
||||||
|
|
||||||
|
<!-- Must be unique among all keeper serves -->
|
||||||
|
<server_id>1</server_id>
|
||||||
|
|
||||||
|
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
|
||||||
|
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||||
|
|
||||||
|
<coordination_settings>
|
||||||
|
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||||
|
<min_session_timeout_ms>10000</min_session_timeout_ms>
|
||||||
|
<session_timeout_ms>100000</session_timeout_ms>
|
||||||
|
<raft_logs_level>information</raft_logs_level>
|
||||||
|
<compress_logs>false</compress_logs>
|
||||||
|
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
|
||||||
|
</coordination_settings>
|
||||||
|
|
||||||
|
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
|
||||||
|
<hostname_checks_enabled>true</hostname_checks_enabled>
|
||||||
|
<raft_configuration>
|
||||||
|
<server>
|
||||||
|
<id>1</id>
|
||||||
|
|
||||||
|
<!-- Internal port and hostname -->
|
||||||
|
<hostname>clickhouses-keeper-1</hostname>
|
||||||
|
<port>9234</port>
|
||||||
|
</server>
|
||||||
|
|
||||||
|
<!-- Add more servers here -->
|
||||||
|
|
||||||
|
</raft_configuration>
|
||||||
|
</keeper_server>
|
||||||
|
</clickhouse>
|
||||||
@@ -36,6 +36,7 @@ receivers:
|
|||||||
# endpoint: 0.0.0.0:6832
|
# endpoint: 0.0.0.0:6832
|
||||||
hostmetrics:
|
hostmetrics:
|
||||||
collection_interval: 30s
|
collection_interval: 30s
|
||||||
|
root_path: /hostfs
|
||||||
scrapers:
|
scrapers:
|
||||||
cpu: {}
|
cpu: {}
|
||||||
load: {}
|
load: {}
|
||||||
@@ -75,7 +76,16 @@ processors:
|
|||||||
# This is added to ensure the uniqueness of the timeseries
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
# collectors result in incorrect APM metrics
|
# collectors result in incorrect APM metrics
|
||||||
- name: 'signoz.collector.id'
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
# memory_limiter:
|
# memory_limiter:
|
||||||
# # 80% of maximum memory up to 2G
|
# # 80% of maximum memory up to 2G
|
||||||
# limit_mib: 1500
|
# limit_mib: 1500
|
||||||
@@ -101,6 +111,7 @@ processors:
|
|||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
dimensions:
|
dimensions:
|
||||||
- name: service.namespace
|
- name: service.namespace
|
||||||
default: default
|
default: default
|
||||||
@@ -110,6 +121,15 @@ processors:
|
|||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
# collectors result in incorrect APM metrics
|
# collectors result in incorrect APM metrics
|
||||||
- name: signoz.collector.id
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
|
|
||||||
extensions:
|
extensions:
|
||||||
health_check:
|
health_check:
|
||||||
@@ -121,31 +141,26 @@ extensions:
|
|||||||
|
|
||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
# logging: {}
|
|
||||||
|
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
sending_queue:
|
use_new_schema: true
|
||||||
queue_size: 100
|
# logging: {}
|
||||||
retry_on_failure:
|
|
||||||
enabled: true
|
|
||||||
initial_interval: 5s
|
|
||||||
max_interval: 30s
|
|
||||||
max_elapsed_time: 300s
|
|
||||||
|
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
|
logs:
|
||||||
|
encoding: json
|
||||||
metrics:
|
metrics:
|
||||||
address: 0.0.0.0:8888
|
address: 0.0.0.0:8888
|
||||||
extensions:
|
extensions:
|
||||||
|
|||||||
@@ -22,4 +22,4 @@ rule_files:
|
|||||||
scrape_configs: []
|
scrape_configs: []
|
||||||
|
|
||||||
remote_read:
|
remote_read:
|
||||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
- url: tcp://clickhouse:9000/signoz_metrics
|
||||||
|
|||||||
26
deploy/docker/clickhouse-setup/test-app-docker-compose.yaml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
services:
|
||||||
|
hotrod:
|
||||||
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
container_name: hotrod
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
command: [ "all" ]
|
||||||
|
environment:
|
||||||
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
|
load-hotrod:
|
||||||
|
image: "signoz/locust:1.2.3"
|
||||||
|
container_name: load-hotrod
|
||||||
|
hostname: load-hotrod
|
||||||
|
environment:
|
||||||
|
ATTACKED_HOST: http://hotrod:8080
|
||||||
|
LOCUST_MODE: standalone
|
||||||
|
NO_PROXY: standalone
|
||||||
|
TASK_DELAY_FROM: 5
|
||||||
|
TASK_DELAY_TO: 30
|
||||||
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
|
volumes:
|
||||||
|
- ../common/locust-scripts:/locust
|
||||||
@@ -1,3 +1,8 @@
|
|||||||
|
map $http_upgrade $connection_upgrade {
|
||||||
|
default upgrade;
|
||||||
|
'' close;
|
||||||
|
}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 3301;
|
listen 3301;
|
||||||
server_name _;
|
server_name _;
|
||||||
@@ -42,6 +47,14 @@ server {
|
|||||||
proxy_read_timeout 600s;
|
proxy_read_timeout 600s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location /ws {
|
||||||
|
proxy_pass http://query-service:8080/ws;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade "websocket";
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
|
||||||
# redirect server error pages to the static page /50x.html
|
# redirect server error pages to the static page /50x.html
|
||||||
#
|
#
|
||||||
error_page 500 502 503 504 /50x.html;
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
|||||||
@@ -389,7 +389,7 @@ trap bye EXIT
|
|||||||
|
|
||||||
URL="https://api.segment.io/v1/track"
|
URL="https://api.segment.io/v1/track"
|
||||||
HEADER_1="Content-Type: application/json"
|
HEADER_1="Content-Type: application/json"
|
||||||
HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
|
HEADER_2="Authorization: Basic OWtScko3b1BDR1BFSkxGNlFqTVBMdDVibGpGaFJRQnI="
|
||||||
|
|
||||||
send_event() {
|
send_event() {
|
||||||
error=""
|
error=""
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# use a minimal alpine image
|
# use a minimal alpine image
|
||||||
FROM alpine:3.18.5
|
FROM alpine:3.18.6
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|||||||
44
ee/query-service/anomaly/daily.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DailyProvider struct {
|
||||||
|
BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BaseProvider = (*DailyProvider)(nil)
|
||||||
|
|
||||||
|
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||||
|
return &dp.BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDailyProvider uses the same generic option type
|
||||||
|
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
|
||||||
|
dp := &DailyProvider{
|
||||||
|
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(dp)
|
||||||
|
}
|
||||||
|
|
||||||
|
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||||
|
Reader: dp.reader,
|
||||||
|
Cache: dp.cache,
|
||||||
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
|
FluxInterval: dp.fluxInterval,
|
||||||
|
FeatureLookup: dp.ff,
|
||||||
|
})
|
||||||
|
|
||||||
|
return dp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||||
|
req.Seasonality = SeasonalityDaily
|
||||||
|
return p.getAnomalies(ctx, req)
|
||||||
|
}
|
||||||
44
ee/query-service/anomaly/hourly.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HourlyProvider struct {
|
||||||
|
BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BaseProvider = (*HourlyProvider)(nil)
|
||||||
|
|
||||||
|
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||||
|
return &hp.BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHourlyProvider now uses the generic option type
|
||||||
|
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
|
||||||
|
hp := &HourlyProvider{
|
||||||
|
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(hp)
|
||||||
|
}
|
||||||
|
|
||||||
|
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||||
|
Reader: hp.reader,
|
||||||
|
Cache: hp.cache,
|
||||||
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
|
FluxInterval: hp.fluxInterval,
|
||||||
|
FeatureLookup: hp.ff,
|
||||||
|
})
|
||||||
|
|
||||||
|
return hp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||||
|
req.Seasonality = SeasonalityHourly
|
||||||
|
return p.getAnomalies(ctx, req)
|
||||||
|
}
|
||||||
248
ee/query-service/anomaly/params.go
Normal file
@@ -0,0 +1,248 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/common"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Seasonality string
|
||||||
|
|
||||||
|
const (
|
||||||
|
SeasonalityHourly Seasonality = "hourly"
|
||||||
|
SeasonalityDaily Seasonality = "daily"
|
||||||
|
SeasonalityWeekly Seasonality = "weekly"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s Seasonality) String() string {
|
||||||
|
return string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
oneWeekOffset = 24 * 7 * time.Hour.Milliseconds()
|
||||||
|
oneDayOffset = 24 * time.Hour.Milliseconds()
|
||||||
|
oneHourOffset = time.Hour.Milliseconds()
|
||||||
|
fiveMinOffset = 5 * time.Minute.Milliseconds()
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s Seasonality) IsValid() bool {
|
||||||
|
switch s {
|
||||||
|
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetAnomaliesRequest struct {
|
||||||
|
Params *v3.QueryRangeParamsV3
|
||||||
|
Seasonality Seasonality
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetAnomaliesResponse struct {
|
||||||
|
Results []*v3.Result
|
||||||
|
}
|
||||||
|
|
||||||
|
// anomalyParams is the params for anomaly detection
|
||||||
|
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
|
||||||
|
//
|
||||||
|
// ^ ^
|
||||||
|
// | |
|
||||||
|
// (rounded value for past peiod) + (seasonal growth)
|
||||||
|
//
|
||||||
|
// score = abs(value - prediction) / stddev (current_season_query)
|
||||||
|
type anomalyQueryParams struct {
|
||||||
|
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
|
||||||
|
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
|
||||||
|
// The results obtained from this query are used to compare with predicted values
|
||||||
|
// and to detect anomalies
|
||||||
|
CurrentPeriodQuery *v3.QueryRangeParamsV3
|
||||||
|
// PastPeriodQuery is the query range params for past seasonal period
|
||||||
|
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||||
|
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||||
|
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||||
|
PastPeriodQuery *v3.QueryRangeParamsV3
|
||||||
|
// CurrentSeasonQuery is the query range params for current period (seasonal)
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
|
||||||
|
CurrentSeasonQuery *v3.QueryRangeParamsV3
|
||||||
|
// PastSeasonQuery is the query range params for past seasonal period to the current season
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||||
|
PastSeasonQuery *v3.QueryRangeParamsV3
|
||||||
|
|
||||||
|
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
|
||||||
|
Past2SeasonQuery *v3.QueryRangeParamsV3
|
||||||
|
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
|
||||||
|
Past3SeasonQuery *v3.QueryRangeParamsV3
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateStepInterval(req *v3.QueryRangeParamsV3) {
|
||||||
|
start := req.Start
|
||||||
|
end := req.End
|
||||||
|
|
||||||
|
req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60))
|
||||||
|
for _, q := range req.CompositeQuery.BuilderQueries {
|
||||||
|
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
|
||||||
|
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
|
||||||
|
q.StepInterval = minStep
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams {
|
||||||
|
start := req.Start
|
||||||
|
end := req.End
|
||||||
|
|
||||||
|
currentPeriodQuery := &v3.QueryRangeParamsV3{
|
||||||
|
Start: start,
|
||||||
|
End: end,
|
||||||
|
CompositeQuery: req.CompositeQuery.Clone(),
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
updateStepInterval(currentPeriodQuery)
|
||||||
|
|
||||||
|
var pastPeriodStart, pastPeriodEnd int64
|
||||||
|
|
||||||
|
switch seasonality {
|
||||||
|
// for one week period, we fetch the data from the past week with 5 min offset
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
|
||||||
|
pastPeriodEnd = end - oneWeekOffset
|
||||||
|
// for one day period, we fetch the data from the past day with 5 min offset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
pastPeriodStart = start - oneDayOffset - fiveMinOffset
|
||||||
|
pastPeriodEnd = end - oneDayOffset
|
||||||
|
// for one hour period, we fetch the data from the past hour with 5 min offset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
pastPeriodStart = start - oneHourOffset - fiveMinOffset
|
||||||
|
pastPeriodEnd = end - oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodQuery := &v3.QueryRangeParamsV3{
|
||||||
|
Start: pastPeriodStart,
|
||||||
|
End: pastPeriodEnd,
|
||||||
|
CompositeQuery: req.CompositeQuery.Clone(),
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
updateStepInterval(pastPeriodQuery)
|
||||||
|
|
||||||
|
// seasonality growth trend
|
||||||
|
var currentGrowthPeriodStart, currentGrowthPeriodEnd int64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
currentGrowthPeriodStart = start - oneWeekOffset
|
||||||
|
currentGrowthPeriodEnd = end
|
||||||
|
case SeasonalityDaily:
|
||||||
|
currentGrowthPeriodStart = start - oneDayOffset
|
||||||
|
currentGrowthPeriodEnd = end
|
||||||
|
case SeasonalityHourly:
|
||||||
|
currentGrowthPeriodStart = start - oneHourOffset
|
||||||
|
currentGrowthPeriodEnd = end
|
||||||
|
}
|
||||||
|
|
||||||
|
currentGrowthQuery := &v3.QueryRangeParamsV3{
|
||||||
|
Start: currentGrowthPeriodStart,
|
||||||
|
End: currentGrowthPeriodEnd,
|
||||||
|
CompositeQuery: req.CompositeQuery.Clone(),
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
updateStepInterval(currentGrowthQuery)
|
||||||
|
|
||||||
|
var pastGrowthPeriodStart, pastGrowthPeriodEnd int64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
pastGrowthPeriodStart = start - 2*oneWeekOffset
|
||||||
|
pastGrowthPeriodEnd = start - 1*oneWeekOffset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
pastGrowthPeriodStart = start - 2*oneDayOffset
|
||||||
|
pastGrowthPeriodEnd = start - 1*oneDayOffset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
pastGrowthPeriodStart = start - 2*oneHourOffset
|
||||||
|
pastGrowthPeriodEnd = start - 1*oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
pastGrowthQuery := &v3.QueryRangeParamsV3{
|
||||||
|
Start: pastGrowthPeriodStart,
|
||||||
|
End: pastGrowthPeriodEnd,
|
||||||
|
CompositeQuery: req.CompositeQuery.Clone(),
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
updateStepInterval(pastGrowthQuery)
|
||||||
|
|
||||||
|
var past2GrowthPeriodStart, past2GrowthPeriodEnd int64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
past2GrowthPeriodStart = start - 3*oneWeekOffset
|
||||||
|
past2GrowthPeriodEnd = start - 2*oneWeekOffset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
past2GrowthPeriodStart = start - 3*oneDayOffset
|
||||||
|
past2GrowthPeriodEnd = start - 2*oneDayOffset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
past2GrowthPeriodStart = start - 3*oneHourOffset
|
||||||
|
past2GrowthPeriodEnd = start - 2*oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
past2GrowthQuery := &v3.QueryRangeParamsV3{
|
||||||
|
Start: past2GrowthPeriodStart,
|
||||||
|
End: past2GrowthPeriodEnd,
|
||||||
|
CompositeQuery: req.CompositeQuery.Clone(),
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
updateStepInterval(past2GrowthQuery)
|
||||||
|
|
||||||
|
var past3GrowthPeriodStart, past3GrowthPeriodEnd int64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
past3GrowthPeriodStart = start - 4*oneWeekOffset
|
||||||
|
past3GrowthPeriodEnd = start - 3*oneWeekOffset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
past3GrowthPeriodStart = start - 4*oneDayOffset
|
||||||
|
past3GrowthPeriodEnd = start - 3*oneDayOffset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
past3GrowthPeriodStart = start - 4*oneHourOffset
|
||||||
|
past3GrowthPeriodEnd = start - 3*oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
past3GrowthQuery := &v3.QueryRangeParamsV3{
|
||||||
|
Start: past3GrowthPeriodStart,
|
||||||
|
End: past3GrowthPeriodEnd,
|
||||||
|
CompositeQuery: req.CompositeQuery.Clone(),
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
updateStepInterval(past3GrowthQuery)
|
||||||
|
|
||||||
|
return &anomalyQueryParams{
|
||||||
|
CurrentPeriodQuery: currentPeriodQuery,
|
||||||
|
PastPeriodQuery: pastPeriodQuery,
|
||||||
|
CurrentSeasonQuery: currentGrowthQuery,
|
||||||
|
PastSeasonQuery: pastGrowthQuery,
|
||||||
|
Past2SeasonQuery: past2GrowthQuery,
|
||||||
|
Past3SeasonQuery: past3GrowthQuery,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type anomalyQueryResults struct {
|
||||||
|
CurrentPeriodResults []*v3.Result
|
||||||
|
PastPeriodResults []*v3.Result
|
||||||
|
CurrentSeasonResults []*v3.Result
|
||||||
|
PastSeasonResults []*v3.Result
|
||||||
|
Past2SeasonResults []*v3.Result
|
||||||
|
Past3SeasonResults []*v3.Result
|
||||||
|
}
|
||||||
9
ee/query-service/anomaly/provider.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Provider interface {
|
||||||
|
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||||
|
}
|
||||||
466
ee/query-service/anomaly/seasonal.go
Normal file
@@ -0,0 +1,466 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(srikanthccv): make this configurable?
|
||||||
|
movingAvgWindowSize = 7
|
||||||
|
)
|
||||||
|
|
||||||
|
// BaseProvider is an interface that includes common methods for all provider types
|
||||||
|
type BaseProvider interface {
|
||||||
|
GetBaseSeasonalProvider() *BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericProviderOption is a generic type for provider options
|
||||||
|
type GenericProviderOption[T BaseProvider] func(T)
|
||||||
|
|
||||||
|
func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] {
|
||||||
|
return func(p T) {
|
||||||
|
p.GetBaseSeasonalProvider().cache = cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] {
|
||||||
|
return func(p T) {
|
||||||
|
p.GetBaseSeasonalProvider().keyGenerator = keyGenerator
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
|
||||||
|
return func(p T) {
|
||||||
|
p.GetBaseSeasonalProvider().ff = ff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
|
||||||
|
return func(p T) {
|
||||||
|
p.GetBaseSeasonalProvider().reader = reader
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type BaseSeasonalProvider struct {
|
||||||
|
querierV2 interfaces.Querier
|
||||||
|
reader interfaces.Reader
|
||||||
|
fluxInterval time.Duration
|
||||||
|
cache cache.Cache
|
||||||
|
keyGenerator cache.KeyGenerator
|
||||||
|
ff interfaces.FeatureLookup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
|
||||||
|
if !req.Seasonality.IsValid() {
|
||||||
|
req.Seasonality = SeasonalityDaily
|
||||||
|
}
|
||||||
|
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||||
|
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||||
|
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
currentPeriodResults, err = postprocess.PostProcessResult(currentPeriodResults, params.CurrentPeriodQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||||
|
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodResults, err = postprocess.PostProcessResult(pastPeriodResults, params.PastPeriodQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||||
|
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
currentSeasonResults, err = postprocess.PostProcessResult(currentSeasonResults, params.CurrentSeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||||
|
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pastSeasonResults, err = postprocess.PostProcessResult(pastSeasonResults, params.PastSeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||||
|
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
past2SeasonResults, err = postprocess.PostProcessResult(past2SeasonResults, params.Past2SeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||||
|
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
past3SeasonResults, err = postprocess.PostProcessResult(past3SeasonResults, params.Past3SeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &anomalyQueryResults{
|
||||||
|
CurrentPeriodResults: currentPeriodResults,
|
||||||
|
PastPeriodResults: pastPeriodResults,
|
||||||
|
CurrentSeasonResults: currentSeasonResults,
|
||||||
|
PastSeasonResults: pastSeasonResults,
|
||||||
|
Past2SeasonResults: past2SeasonResults,
|
||||||
|
Past3SeasonResults: past3SeasonResults,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMatchingSeries gets the matching series from the query result
|
||||||
|
// for the given series
|
||||||
|
func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series {
|
||||||
|
if queryResult == nil || len(queryResult.Series) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, curr := range queryResult.Series {
|
||||||
|
currLabels := labels.FromMap(curr.Labels)
|
||||||
|
seriesLabels := labels.FromMap(series.Labels)
|
||||||
|
if currLabels.Hash() == seriesLabels.Hash() {
|
||||||
|
return curr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 {
|
||||||
|
if series == nil || len(series.Points) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
for _, smpl := range series.Points {
|
||||||
|
sum += smpl.Value
|
||||||
|
}
|
||||||
|
return sum / float64(len(series.Points))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 {
|
||||||
|
if series == nil || len(series.Points) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
avg := p.getAvg(series)
|
||||||
|
var sum float64
|
||||||
|
for _, smpl := range series.Points {
|
||||||
|
sum += math.Pow(smpl.Value-avg, 2)
|
||||||
|
}
|
||||||
|
return math.Sqrt(sum / float64(len(series.Points)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMovingAvg gets the moving average for the given series
|
||||||
|
// for the given window size and start index
|
||||||
|
func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSize, startIdx int) float64 {
|
||||||
|
if series == nil || len(series.Points) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if startIdx >= len(series.Points)-movingAvgWindowSize {
|
||||||
|
startIdx = int(math.Max(0, float64(len(series.Points)-movingAvgWindowSize)))
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
points := series.Points[startIdx:]
|
||||||
|
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
|
||||||
|
sum += points[i].Value
|
||||||
|
}
|
||||||
|
avg := sum / float64(movingAvgWindowSize)
|
||||||
|
return avg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
|
||||||
|
if len(floats) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
for _, f := range floats {
|
||||||
|
sum += f
|
||||||
|
}
|
||||||
|
return sum / float64(len(floats))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||||
|
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
|
||||||
|
) *v3.Series {
|
||||||
|
predictedSeries := &v3.Series{
|
||||||
|
Labels: series.Labels,
|
||||||
|
LabelsArray: series.LabelsArray,
|
||||||
|
Points: []v3.Point{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// for each point in the series, get the predicted value
|
||||||
|
// the predicted value is the moving average (with window size = 7) of the previous period series
|
||||||
|
// plus the average of the current season series
|
||||||
|
// minus the mean of the past season series, past2 season series and past3 season series
|
||||||
|
for idx, curr := range series.Points {
|
||||||
|
predictedValue :=
|
||||||
|
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
|
||||||
|
p.getAvg(currentSeasonSeries) -
|
||||||
|
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||||
|
|
||||||
|
if predictedValue < 0 {
|
||||||
|
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("predictedSeries",
|
||||||
|
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
|
||||||
|
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
|
||||||
|
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
|
||||||
|
zap.Any("labels", series.Labels),
|
||||||
|
zap.Float64("predictedValue", predictedValue),
|
||||||
|
)
|
||||||
|
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: predictedValue,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return predictedSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBounds gets the upper and lower bounds for the given series
|
||||||
|
// for the given z score threshold
|
||||||
|
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||||
|
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||||
|
func (p *BaseSeasonalProvider) getBounds(
|
||||||
|
series, predictedSeries *v3.Series,
|
||||||
|
zScoreThreshold float64,
|
||||||
|
) (*v3.Series, *v3.Series) {
|
||||||
|
upperBoundSeries := &v3.Series{
|
||||||
|
Labels: series.Labels,
|
||||||
|
LabelsArray: series.LabelsArray,
|
||||||
|
Points: []v3.Point{},
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerBoundSeries := &v3.Series{
|
||||||
|
Labels: series.Labels,
|
||||||
|
LabelsArray: series.LabelsArray,
|
||||||
|
Points: []v3.Point{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, curr := range series.Points {
|
||||||
|
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||||
|
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||||
|
upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: upperBound,
|
||||||
|
})
|
||||||
|
lowerBoundSeries.Points = append(lowerBoundSeries.Points, v3.Point{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: math.Max(lowerBound, 0),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return upperBoundSeries, lowerBoundSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
// getExpectedValue gets the expected value for the given series
|
||||||
|
// for the given index
|
||||||
|
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
|
||||||
|
func (p *BaseSeasonalProvider) getExpectedValue(
|
||||||
|
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, idx int,
|
||||||
|
) float64 {
|
||||||
|
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||||
|
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||||
|
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||||
|
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||||
|
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||||
|
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getScore gets the anomaly score for the given series
|
||||||
|
// for the given index
|
||||||
|
// (value - expectedValue) / std dev of the series
|
||||||
|
func (p *BaseSeasonalProvider) getScore(
|
||||||
|
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||||
|
) float64 {
|
||||||
|
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||||
|
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAnomalyScores gets the anomaly scores for the given series
|
||||||
|
// for the given index
|
||||||
|
// (value - expectedValue) / std dev of the series
|
||||||
|
func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||||
|
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
|
||||||
|
) *v3.Series {
|
||||||
|
anomalyScoreSeries := &v3.Series{
|
||||||
|
Labels: series.Labels,
|
||||||
|
LabelsArray: series.LabelsArray,
|
||||||
|
Points: []v3.Point{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, curr := range series.Points {
|
||||||
|
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
|
||||||
|
anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: anomalyScore,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return anomalyScoreSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||||
|
anomalyParams := p.getQueryParams(req)
|
||||||
|
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
currentPeriodResultsMap := make(map[string]*v3.Result)
|
||||||
|
for _, result := range anomalyQueryResults.CurrentPeriodResults {
|
||||||
|
currentPeriodResultsMap[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodResultsMap := make(map[string]*v3.Result)
|
||||||
|
for _, result := range anomalyQueryResults.PastPeriodResults {
|
||||||
|
pastPeriodResultsMap[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
currentSeasonResultsMap := make(map[string]*v3.Result)
|
||||||
|
for _, result := range anomalyQueryResults.CurrentSeasonResults {
|
||||||
|
currentSeasonResultsMap[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
pastSeasonResultsMap := make(map[string]*v3.Result)
|
||||||
|
for _, result := range anomalyQueryResults.PastSeasonResults {
|
||||||
|
pastSeasonResultsMap[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
past2SeasonResultsMap := make(map[string]*v3.Result)
|
||||||
|
for _, result := range anomalyQueryResults.Past2SeasonResults {
|
||||||
|
past2SeasonResultsMap[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
past3SeasonResultsMap := make(map[string]*v3.Result)
|
||||||
|
for _, result := range anomalyQueryResults.Past3SeasonResults {
|
||||||
|
past3SeasonResultsMap[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, result := range currentPeriodResultsMap {
|
||||||
|
funcs := req.Params.CompositeQuery.BuilderQueries[result.QueryName].Functions
|
||||||
|
|
||||||
|
var zScoreThreshold float64
|
||||||
|
for _, f := range funcs {
|
||||||
|
if f.Name == v3.FunctionNameAnomaly {
|
||||||
|
value, ok := f.NamedArgs["z_score_threshold"]
|
||||||
|
if ok {
|
||||||
|
zScoreThreshold = value.(float64)
|
||||||
|
} else {
|
||||||
|
zScoreThreshold = 3
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
past2SeasonResult, ok := past2SeasonResultsMap[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
past3SeasonResult, ok := past3SeasonResultsMap[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, series := range result.Series {
|
||||||
|
stdDev := p.getStdDev(series)
|
||||||
|
zap.L().Info("stdDev", zap.Float64("stdDev", stdDev), zap.Any("labels", series.Labels))
|
||||||
|
|
||||||
|
pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series)
|
||||||
|
currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series)
|
||||||
|
pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series)
|
||||||
|
past2SeasonSeries := p.getMatchingSeries(past2SeasonResult, series)
|
||||||
|
past3SeasonSeries := p.getMatchingSeries(past3SeasonResult, series)
|
||||||
|
|
||||||
|
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||||
|
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||||
|
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||||
|
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||||
|
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||||
|
zap.L().Info("getAvg", zap.Float64("prevSeriesAvg", prevSeriesAvg), zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg), zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg), zap.Float64("past2SeasonSeriesAvg", past2SeasonSeriesAvg), zap.Float64("past3SeasonSeriesAvg", past3SeasonSeriesAvg), zap.Any("labels", series.Labels))
|
||||||
|
|
||||||
|
predictedSeries := p.getPredictedSeries(
|
||||||
|
series,
|
||||||
|
pastPeriodSeries,
|
||||||
|
currentSeasonSeries,
|
||||||
|
pastSeasonSeries,
|
||||||
|
past2SeasonSeries,
|
||||||
|
past3SeasonSeries,
|
||||||
|
)
|
||||||
|
result.PredictedSeries = append(result.PredictedSeries, predictedSeries)
|
||||||
|
|
||||||
|
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||||
|
series,
|
||||||
|
predictedSeries,
|
||||||
|
zScoreThreshold,
|
||||||
|
)
|
||||||
|
result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries)
|
||||||
|
result.LowerBoundSeries = append(result.LowerBoundSeries, lowerBoundSeries)
|
||||||
|
|
||||||
|
anomalyScoreSeries := p.getAnomalyScores(
|
||||||
|
series,
|
||||||
|
pastPeriodSeries,
|
||||||
|
currentSeasonSeries,
|
||||||
|
pastSeasonSeries,
|
||||||
|
past2SeasonSeries,
|
||||||
|
past3SeasonSeries,
|
||||||
|
)
|
||||||
|
result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
results := make([]*v3.Result, 0, len(currentPeriodResultsMap))
|
||||||
|
for _, result := range currentPeriodResultsMap {
|
||||||
|
results = append(results, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &GetAnomaliesResponse{
|
||||||
|
Results: results,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
43
ee/query-service/anomaly/weekly.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WeeklyProvider struct {
|
||||||
|
BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BaseProvider = (*WeeklyProvider)(nil)
|
||||||
|
|
||||||
|
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||||
|
return &wp.BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
|
||||||
|
wp := &WeeklyProvider{
|
||||||
|
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(wp)
|
||||||
|
}
|
||||||
|
|
||||||
|
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||||
|
Reader: wp.reader,
|
||||||
|
Cache: wp.cache,
|
||||||
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
|
FluxInterval: wp.fluxInterval,
|
||||||
|
FeatureLookup: wp.ff,
|
||||||
|
})
|
||||||
|
|
||||||
|
return wp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||||
|
req.Seasonality = SeasonalityWeekly
|
||||||
|
return p.getAnomalies(ctx, req)
|
||||||
|
}
|
||||||
@@ -2,14 +2,17 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/usage"
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
@@ -21,7 +24,6 @@ import (
|
|||||||
type APIHandlerOptions struct {
|
type APIHandlerOptions struct {
|
||||||
DataConnector interfaces.DataConnector
|
DataConnector interfaces.DataConnector
|
||||||
SkipConfig *basemodel.SkipConfig
|
SkipConfig *basemodel.SkipConfig
|
||||||
PreferDelta bool
|
|
||||||
PreferSpanMetrics bool
|
PreferSpanMetrics bool
|
||||||
MaxIdleConns int
|
MaxIdleConns int
|
||||||
MaxOpenConns int
|
MaxOpenConns int
|
||||||
@@ -31,10 +33,13 @@ type APIHandlerOptions struct {
|
|||||||
UsageManager *usage.Manager
|
UsageManager *usage.Manager
|
||||||
FeatureFlags baseint.FeatureLookup
|
FeatureFlags baseint.FeatureLookup
|
||||||
LicenseManager *license.Manager
|
LicenseManager *license.Manager
|
||||||
|
IntegrationsController *integrations.Controller
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
|
Gateway *httputil.ReverseProxy
|
||||||
// Querier Influx Interval
|
// Querier Influx Interval
|
||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
|
UseLogsNewSchema bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type APIHandler struct {
|
type APIHandler struct {
|
||||||
@@ -48,7 +53,6 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
|||||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||||
Reader: opts.DataConnector,
|
Reader: opts.DataConnector,
|
||||||
SkipConfig: opts.SkipConfig,
|
SkipConfig: opts.SkipConfig,
|
||||||
PerferDelta: opts.PreferDelta,
|
|
||||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||||
MaxIdleConns: opts.MaxIdleConns,
|
MaxIdleConns: opts.MaxIdleConns,
|
||||||
MaxOpenConns: opts.MaxOpenConns,
|
MaxOpenConns: opts.MaxOpenConns,
|
||||||
@@ -56,9 +60,11 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
|||||||
AppDao: opts.AppDao,
|
AppDao: opts.AppDao,
|
||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
FeatureFlags: opts.FeatureFlags,
|
FeatureFlags: opts.FeatureFlags,
|
||||||
|
IntegrationsController: opts.IntegrationsController,
|
||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
Cache: opts.Cache,
|
Cache: opts.Cache,
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
|
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -92,6 +98,10 @@ func (ah *APIHandler) AppDao() dao.ModelDao {
|
|||||||
return ah.opts.AppDao
|
return ah.opts.AppDao
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
|
||||||
|
return ah.opts.Gateway
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) CheckFeature(f string) bool {
|
func (ah *APIHandler) CheckFeature(f string) bool {
|
||||||
err := ah.FF().CheckFeature(f)
|
err := ah.FF().CheckFeature(f)
|
||||||
return err == nil
|
return err == nil
|
||||||
@@ -149,12 +159,12 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
|||||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
|
||||||
|
|
||||||
// PAT APIs
|
// PAT APIs
|
||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.createPAT)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
|
||||||
|
|
||||||
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||||
@@ -167,6 +177,11 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
|||||||
am.ViewAccess(ah.listLicensesV2)).
|
am.ViewAccess(ah.listLicensesV2)).
|
||||||
Methods(http.MethodGet)
|
Methods(http.MethodGet)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||||
|
|
||||||
|
// Gateway
|
||||||
|
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
||||||
|
|
||||||
ah.APIHandler.RegisterRoutes(router, am)
|
ah.APIHandler.RegisterRoutes(router, am)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
@@ -51,7 +50,7 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if all looks good, call auth
|
// if all looks good, call auth
|
||||||
resp, err := auth.Login(ctx, &req)
|
resp, err := baseauth.Login(ctx, &req)
|
||||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -74,7 +73,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := io.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received no input in api\n", err)
|
zap.L().Error("received no input in api", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -82,7 +81,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
err = json.Unmarshal(requestBody, &req)
|
err = json.Unmarshal(requestBody, &req)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received invalid user registration request", zap.Error(err))
|
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -90,13 +89,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
// get invite object
|
// get invite object
|
||||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to validate invite token", err)
|
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if invite == nil {
|
if invite == nil {
|
||||||
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
|
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -104,7 +103,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
// get auth domain from email domain
|
// get auth domain from email domain
|
||||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
zap.S().Errorf("failed to get domain from email", apierr)
|
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,7 +129,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
// no-sso, validate password
|
// no-sso, validate password
|
||||||
if err := auth.ValidatePassword(req.Password); err != nil {
|
if err := baseauth.ValidatePassword(req.Password); err != nil {
|
||||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -205,24 +204,24 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if !ah.CheckFeature(model.SSO) {
|
if !ah.CheckFeature(model.SSO) {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
q := r.URL.Query()
|
q := r.URL.Query()
|
||||||
if errType := q.Get("error"); errType != "" {
|
if errType := q.Get("error"); errType != "" {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
|
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
relayState := q.Get("state")
|
relayState := q.Get("state")
|
||||||
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||||
|
|
||||||
parsedState, err := url.Parse(relayState)
|
parsedState, err := url.Parse(relayState)
|
||||||
if err != nil || relayState == "" {
|
if err != nil || relayState == "" {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -241,17 +240,22 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
|||||||
// prepare google callback handler using parsedState -
|
// prepare google callback handler using parsedState -
|
||||||
// which contains redirect URL (front-end endpoint)
|
// which contains redirect URL (front-end endpoint)
|
||||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("[receiveGoogleAuth] failed to prepare google oauth provider", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
|
handleSsoError(w, r, redirectUri)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
identity, err := callbackHandler.HandleCallback(r)
|
identity, err := callbackHandler.HandleCallback(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -266,14 +270,14 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if !ah.CheckFeature(model.SSO) {
|
if !ah.CheckFeature(model.SSO) {
|
||||||
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -281,11 +285,11 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
// the relay state is sent when a login request is submitted to
|
// the relay state is sent when a login request is submitted to
|
||||||
// Idp.
|
// Idp.
|
||||||
relayState := r.FormValue("RelayState")
|
relayState := r.FormValue("RelayState")
|
||||||
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||||
|
|
||||||
parsedState, err := url.Parse(relayState)
|
parsedState, err := url.Parse(relayState)
|
||||||
if err != nil || relayState == "" {
|
if err != nil || relayState == "" {
|
||||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -302,34 +306,34 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if assertionInfo.WarningInfo.InvalidTime {
|
if assertionInfo.WarningInfo.InvalidTime {
|
||||||
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
email := assertionInfo.NameID
|
email := assertionInfo.NameID
|
||||||
if email == "" {
|
if email == "" {
|
||||||
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
|
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String()))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,15 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
"go.signoz.io/signoz/pkg/query-service/common"
|
"go.signoz.io/signoz/pkg/query-service/common"
|
||||||
"go.signoz.io/signoz/pkg/query-service/model"
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -28,6 +31,10 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
|
|||||||
|
|
||||||
// Get the dashboard UUID from the request
|
// Get the dashboard UUID from the request
|
||||||
uuid := mux.Vars(r)["uuid"]
|
uuid := mux.Vars(r)["uuid"]
|
||||||
|
if strings.HasPrefix(uuid,"integration") {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
|
||||||
|
return
|
||||||
|
}
|
||||||
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
|||||||
@@ -1,17 +1,48 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
featureSet, err := ah.FF().GetFeatureFlags()
|
featureSet, err := ah.FF().GetFeatureFlags()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if constants.FetchFeatures == "true" {
|
||||||
|
zap.L().Debug("fetching license")
|
||||||
|
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("failed to fetch license", zap.Error(err))
|
||||||
|
} else if license == nil {
|
||||||
|
zap.L().Debug("no active license found")
|
||||||
|
} else {
|
||||||
|
licenseKey := license.Key
|
||||||
|
|
||||||
|
zap.L().Debug("fetching zeus features")
|
||||||
|
zeusFeatures, err := fetchZeusFeatures(constants.ZeusFeaturesURL, licenseKey)
|
||||||
|
if err == nil {
|
||||||
|
zap.L().Debug("fetched zeus features", zap.Any("features", zeusFeatures))
|
||||||
|
// merge featureSet and zeusFeatures in featureSet with higher priority to zeusFeatures
|
||||||
|
featureSet = MergeFeatureSets(zeusFeatures, featureSet)
|
||||||
|
} else {
|
||||||
|
zap.L().Error("failed to fetch zeus features", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if ah.opts.PreferSpanMetrics {
|
if ah.opts.PreferSpanMetrics {
|
||||||
for idx := range featureSet {
|
for idx := range featureSet {
|
||||||
feature := &featureSet[idx]
|
feature := &featureSet[idx]
|
||||||
@@ -20,5 +51,96 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ah.Respond(w, featureSet)
|
ah.Respond(w, featureSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
|
||||||
|
// and returns the FeatureSet.
|
||||||
|
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
|
||||||
|
// Check if the URL is empty
|
||||||
|
if url == "" {
|
||||||
|
return nil, fmt.Errorf("url is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the licenseKey is empty
|
||||||
|
if licenseKey == "" {
|
||||||
|
return nil, fmt.Errorf("licenseKey is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creating an HTTP client with a timeout for better control
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
// Creating a new GET request
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setting the custom header
|
||||||
|
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||||
|
|
||||||
|
// Making the GET request
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to make GET request: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Check for non-OK status code
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%w: %d %s", errors.New("received non-OK HTTP status code"), resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reading and decoding the response body
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeusResponse ZeusFeaturesResponse
|
||||||
|
if err := json.Unmarshal(body, &zeusResponse); err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %v", errors.New("failed to decode response body"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if zeusResponse.Status != "success" {
|
||||||
|
return nil, fmt.Errorf("%w: %s", errors.New("failed to fetch zeus features"), zeusResponse.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return zeusResponse.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ZeusFeaturesResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data basemodel.FeatureSet `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
|
||||||
|
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
|
||||||
|
// Create a map to store the merged features
|
||||||
|
featureMap := make(map[string]basemodel.Feature)
|
||||||
|
|
||||||
|
// Add all features from the otherFeatures set to the map
|
||||||
|
for _, feature := range internalFeatures {
|
||||||
|
featureMap[feature.Name] = feature
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add all features from the zeusFeatures set to the map
|
||||||
|
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it
|
||||||
|
for _, feature := range zeusFeatures {
|
||||||
|
featureMap[feature.Name] = feature
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert the map back to a FeatureSet slice
|
||||||
|
var mergedFeatures basemodel.FeatureSet
|
||||||
|
for _, feature := range featureMap {
|
||||||
|
mergedFeatures = append(mergedFeatures, feature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mergedFeatures
|
||||||
|
}
|
||||||
|
|||||||
88
ee/query-service/app/api/featureFlags_test.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergeFeatureSets(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
zeusFeatures basemodel.FeatureSet
|
||||||
|
internalFeatures basemodel.FeatureSet
|
||||||
|
expected basemodel.FeatureSet
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty zeusFeatures and internalFeatures",
|
||||||
|
zeusFeatures: basemodel.FeatureSet{},
|
||||||
|
internalFeatures: basemodel.FeatureSet{},
|
||||||
|
expected: basemodel.FeatureSet{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-empty zeusFeatures and empty internalFeatures",
|
||||||
|
zeusFeatures: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: false},
|
||||||
|
},
|
||||||
|
internalFeatures: basemodel.FeatureSet{},
|
||||||
|
expected: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty zeusFeatures and non-empty internalFeatures",
|
||||||
|
zeusFeatures: basemodel.FeatureSet{},
|
||||||
|
internalFeatures: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: false},
|
||||||
|
},
|
||||||
|
expected: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
|
||||||
|
zeusFeatures: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature3", Active: false},
|
||||||
|
},
|
||||||
|
internalFeatures: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature2", Active: true},
|
||||||
|
{Name: "Feature4", Active: false},
|
||||||
|
},
|
||||||
|
expected: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: true},
|
||||||
|
{Name: "Feature3", Active: false},
|
||||||
|
{Name: "Feature4", Active: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
|
||||||
|
zeusFeatures: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: false},
|
||||||
|
},
|
||||||
|
internalFeatures: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: false},
|
||||||
|
{Name: "Feature3", Active: true},
|
||||||
|
},
|
||||||
|
expected: basemodel.FeatureSet{
|
||||||
|
{Name: "Feature1", Active: true},
|
||||||
|
{Name: "Feature2", Active: false},
|
||||||
|
{Name: "Feature3", Active: true},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
actual := MergeFeatureSets(test.zeusFeatures, test.internalFeatures)
|
||||||
|
assert.ElementsMatch(t, test.expected, actual)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
34
ee/query-service/app/api/gateway.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||||
|
ctx := req.Context()
|
||||||
|
if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) {
|
||||||
|
rw.WriteHeader(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(rw, err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create headers
|
||||||
|
var licenseKey string
|
||||||
|
if license != nil {
|
||||||
|
licenseKey = license.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||||
|
req.Header.Set("X-Consumer-Username", "lid:00000000-0000-0000-0000-000000000000")
|
||||||
|
req.Header.Set("X-Consumer-Groups", "ns:default")
|
||||||
|
|
||||||
|
ah.Gateway().ServeHTTP(rw, req)
|
||||||
|
}
|
||||||
@@ -12,6 +12,20 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type DayWiseBreakdown struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Breakdown []DayWiseData `json:"breakdown"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DayWiseData struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Count float64 `json:"count"`
|
||||||
|
Size float64 `json:"size"`
|
||||||
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
|
Quantity float64 `json:"quantity"`
|
||||||
|
Total float64 `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
type tierBreakdown struct {
|
type tierBreakdown struct {
|
||||||
UnitPrice float64 `json:"unitPrice"`
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
Quantity float64 `json:"quantity"`
|
Quantity float64 `json:"quantity"`
|
||||||
@@ -21,9 +35,10 @@ type tierBreakdown struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type usageResponse struct {
|
type usageResponse struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Unit string `json:"unit"`
|
Unit string `json:"unit"`
|
||||||
Tiers []tierBreakdown `json:"tiers"`
|
Tiers []tierBreakdown `json:"tiers"`
|
||||||
|
DayWiseBreakdown DayWiseBreakdown `json:"dayWiseBreakdown"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type details struct {
|
type details struct {
|
||||||
@@ -40,6 +55,7 @@ type billingDetails struct {
|
|||||||
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
|
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
|
||||||
Details details `json:"details"`
|
Details details `json:"details"`
|
||||||
Discount float64 `json:"discount"`
|
Discount float64 `json:"discount"`
|
||||||
|
SubscriptionStatus string `json:"subscriptionStatus"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,7 +191,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
|||||||
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
|
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error while creating request for trial details", err)
|
zap.L().Error("Error while creating request for trial details", zap.Error(err))
|
||||||
// If there is an error in fetching trial details, we will still return the license details
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
// to avoid blocking the UI
|
// to avoid blocking the UI
|
||||||
ah.Respond(w, resp)
|
ah.Respond(w, resp)
|
||||||
@@ -184,7 +200,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
|||||||
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
trialResp, err := hClient.Do(req)
|
trialResp, err := hClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error while fetching trial details", err)
|
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||||
// If there is an error in fetching trial details, we will still return the license details
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
// to avoid incorrectly blocking the UI
|
// to avoid incorrectly blocking the UI
|
||||||
ah.Respond(w, resp)
|
ah.Respond(w, resp)
|
||||||
@@ -195,7 +211,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
|||||||
trialRespBody, err := io.ReadAll(trialResp.Body)
|
trialRespBody, err := io.ReadAll(trialResp.Body)
|
||||||
|
|
||||||
if err != nil || trialResp.StatusCode != http.StatusOK {
|
if err != nil || trialResp.StatusCode != http.StatusOK {
|
||||||
zap.S().Error("Error while fetching trial details", err)
|
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||||
// If there is an error in fetching trial details, we will still return the license details
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
// to avoid incorrectly blocking the UI
|
// to avoid incorrectly blocking the UI
|
||||||
ah.Respond(w, resp)
|
ah.Respond(w, resp)
|
||||||
@@ -206,7 +222,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
|||||||
var trialRespData model.SubscriptionServerResp
|
var trialRespData model.SubscriptionServerResp
|
||||||
|
|
||||||
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
|
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
|
||||||
zap.S().Error("Error while decoding trial details", err)
|
zap.L().Error("Error while decoding trial details", zap.Error(err))
|
||||||
// If there is an error in fetching trial details, we will still return the license details
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
// to avoid incorrectly blocking the UI
|
// to avoid incorrectly blocking the UI
|
||||||
ah.Respond(w, resp)
|
ah.Respond(w, resp)
|
||||||
|
|||||||
@@ -1,236 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"text/template"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/metrics"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/parser"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
|
||||||
querytemplate "go.signoz.io/signoz/pkg/query-service/utils/queryTemplate"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
|
|
||||||
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
|
|
||||||
ah.APIHandler.QueryRangeMetricsV2(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
|
|
||||||
|
|
||||||
if apiErrorObj != nil {
|
|
||||||
zap.S().Errorf(apiErrorObj.Err.Error())
|
|
||||||
RespondError(w, apiErrorObj, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// prometheus instant query needs same timestamp
|
|
||||||
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
|
|
||||||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.PROM {
|
|
||||||
metricsQueryRangeParams.Start = metricsQueryRangeParams.End
|
|
||||||
}
|
|
||||||
|
|
||||||
// round up the end to nearest multiple
|
|
||||||
if metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER {
|
|
||||||
end := (metricsQueryRangeParams.End) / 1000
|
|
||||||
step := metricsQueryRangeParams.Step
|
|
||||||
metricsQueryRangeParams.End = (end / step * step) * 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
type channelResult struct {
|
|
||||||
Series []*basemodel.Series
|
|
||||||
TableName string
|
|
||||||
Err error
|
|
||||||
Name string
|
|
||||||
Query string
|
|
||||||
}
|
|
||||||
|
|
||||||
execClickHouseQueries := func(queries map[string]string) ([]*basemodel.Series, []string, error, map[string]string) {
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
var tableName []string
|
|
||||||
ch := make(chan channelResult, len(queries))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for name, query := range queries {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(name, query string) {
|
|
||||||
defer wg.Done()
|
|
||||||
seriesList, tableName, err := ah.opts.DataConnector.GetMetricResultEE(r.Context(), query)
|
|
||||||
for _, series := range seriesList {
|
|
||||||
series.QueryName = name
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch <- channelResult{Series: seriesList, TableName: tableName}
|
|
||||||
}(name, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
errQuriesByName := make(map[string]string)
|
|
||||||
// read values from the channel
|
|
||||||
for r := range ch {
|
|
||||||
if r.Err != nil {
|
|
||||||
errs = append(errs, r.Err)
|
|
||||||
errQuriesByName[r.Name] = r.Query
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seriesList = append(seriesList, r.Series...)
|
|
||||||
tableName = append(tableName, r.TableName)
|
|
||||||
}
|
|
||||||
if len(errs) != 0 {
|
|
||||||
return nil, nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
|
|
||||||
}
|
|
||||||
return seriesList, tableName, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
execPromQueries := func(metricsQueryRangeParams *basemodel.QueryRangeParamsV2) ([]*basemodel.Series, error, map[string]string) {
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
ch := make(chan channelResult, len(metricsQueryRangeParams.CompositeMetricQuery.PromQueries))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for name, query := range metricsQueryRangeParams.CompositeMetricQuery.PromQueries {
|
|
||||||
if query.Disabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
wg.Add(1)
|
|
||||||
go func(name string, query *basemodel.PromQuery) {
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
defer wg.Done()
|
|
||||||
tmpl := template.New("promql-query")
|
|
||||||
tmpl, tmplErr := tmpl.Parse(query.Query)
|
|
||||||
if tmplErr != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var queryBuf bytes.Buffer
|
|
||||||
tmplErr = tmpl.Execute(&queryBuf, metricsQueryRangeParams.Variables)
|
|
||||||
if tmplErr != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
query.Query = queryBuf.String()
|
|
||||||
queryModel := basemodel.QueryRangeParams{
|
|
||||||
Start: time.UnixMilli(metricsQueryRangeParams.Start),
|
|
||||||
End: time.UnixMilli(metricsQueryRangeParams.End),
|
|
||||||
Step: time.Duration(metricsQueryRangeParams.Step * int64(time.Second)),
|
|
||||||
Query: query.Query,
|
|
||||||
}
|
|
||||||
promResult, _, err := ah.opts.DataConnector.GetQueryRangeResult(r.Context(), &queryModel)
|
|
||||||
if err != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query.Query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
matrix, _ := promResult.Matrix()
|
|
||||||
for _, v := range matrix {
|
|
||||||
var s basemodel.Series
|
|
||||||
s.QueryName = name
|
|
||||||
s.Labels = v.Metric.Copy().Map()
|
|
||||||
for _, p := range v.Floats {
|
|
||||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.F})
|
|
||||||
}
|
|
||||||
seriesList = append(seriesList, &s)
|
|
||||||
}
|
|
||||||
ch <- channelResult{Series: seriesList}
|
|
||||||
}(name, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
errQuriesByName := make(map[string]string)
|
|
||||||
// read values from the channel
|
|
||||||
for r := range ch {
|
|
||||||
if r.Err != nil {
|
|
||||||
errs = append(errs, r.Err)
|
|
||||||
errQuriesByName[r.Name] = r.Query
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seriesList = append(seriesList, r.Series...)
|
|
||||||
}
|
|
||||||
if len(errs) != 0 {
|
|
||||||
return nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
|
|
||||||
}
|
|
||||||
return seriesList, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
var tableName []string
|
|
||||||
var err error
|
|
||||||
var errQuriesByName map[string]string
|
|
||||||
switch metricsQueryRangeParams.CompositeMetricQuery.QueryType {
|
|
||||||
case basemodel.QUERY_BUILDER:
|
|
||||||
runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
|
||||||
if runQueries.Err != nil {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: runQueries.Err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(runQueries.Queries)
|
|
||||||
|
|
||||||
case basemodel.CLICKHOUSE:
|
|
||||||
queries := make(map[string]string)
|
|
||||||
|
|
||||||
for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries {
|
|
||||||
if chQuery.Disabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tmpl := template.New("clickhouse-query")
|
|
||||||
tmpl, err := tmpl.Parse(chQuery.Query)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var query bytes.Buffer
|
|
||||||
|
|
||||||
// replace go template variables
|
|
||||||
querytemplate.AssignReservedVars(metricsQueryRangeParams)
|
|
||||||
|
|
||||||
err = tmpl.Execute(&query, metricsQueryRangeParams.Variables)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
queries[name] = query.String()
|
|
||||||
}
|
|
||||||
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(queries)
|
|
||||||
case basemodel.PROM:
|
|
||||||
seriesList, err, errQuriesByName = execPromQueries(metricsQueryRangeParams)
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("invalid query type")
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, errQuriesByName)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
apiErrObj := &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}
|
|
||||||
RespondError(w, apiErrObj, errQuriesByName)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
|
|
||||||
len(seriesList) > 1 &&
|
|
||||||
(metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER ||
|
|
||||||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.CLICKHOUSE) {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResponseFormat struct {
|
|
||||||
ResultType string `json:"resultType"`
|
|
||||||
Result []*basemodel.Series `json:"result"`
|
|
||||||
TableName []string `json:"tableName"`
|
|
||||||
}
|
|
||||||
resp := ResponseFormat{ResultType: "matrix", Result: seriesList, TableName: tableName}
|
|
||||||
ah.Respond(w, resp)
|
|
||||||
}
|
|
||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
@@ -28,7 +29,7 @@ func generatePATToken() string {
|
|||||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
req := model.PAT{}
|
req := model.CreatePATRequestBody{}
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
@@ -41,30 +42,87 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pat := model.PAT{
|
||||||
// All the PATs are associated with the user creating the PAT. Hence, the permissions
|
Name: req.Name,
|
||||||
// associated with the PAT is also equivalent to that of the user.
|
Role: req.Role,
|
||||||
req.UserID = user.Id
|
ExpiresAt: req.ExpiresInDays,
|
||||||
req.CreatedAt = time.Now().Unix()
|
|
||||||
req.Token = generatePATToken()
|
|
||||||
|
|
||||||
// default expiry is 30 days
|
|
||||||
if req.ExpiresAt == 0 {
|
|
||||||
req.ExpiresAt = time.Now().AddDate(0, 0, 30).Unix()
|
|
||||||
}
|
}
|
||||||
// max expiry is 1 year
|
err = validatePATRequest(pat)
|
||||||
if req.ExpiresAt > time.Now().AddDate(1, 0, 0).Unix() {
|
if err != nil {
|
||||||
req.ExpiresAt = time.Now().AddDate(1, 0, 0).Unix()
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugf("Got PAT request: %+v", req)
|
// All the PATs are associated with the user creating the PAT.
|
||||||
|
pat.UserID = user.Id
|
||||||
|
pat.CreatedAt = time.Now().Unix()
|
||||||
|
pat.UpdatedAt = time.Now().Unix()
|
||||||
|
pat.LastUsed = 0
|
||||||
|
pat.Token = generatePATToken()
|
||||||
|
|
||||||
|
if pat.ExpiresAt != 0 {
|
||||||
|
// convert expiresAt to unix timestamp from days
|
||||||
|
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||||
var apierr basemodel.BaseApiError
|
var apierr basemodel.BaseApiError
|
||||||
if req, apierr = ah.AppDao().CreatePAT(ctx, req); apierr != nil {
|
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ah.Respond(w, &req)
|
ah.Respond(w, &pat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatePATRequest(req model.PAT) error {
|
||||||
|
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||||
|
return fmt.Errorf("valid role is required")
|
||||||
|
}
|
||||||
|
if req.ExpiresAt < 0 {
|
||||||
|
return fmt.Errorf("valid expiresAt is required")
|
||||||
|
}
|
||||||
|
if req.Name == "" {
|
||||||
|
return fmt.Errorf("valid name is required")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
req := model.PAT{}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user, err := auth.GetUserFromRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{
|
||||||
|
Typ: model.ErrorUnauthorized,
|
||||||
|
Err: err,
|
||||||
|
}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = validatePATRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.UpdatedByUserID = user.Id
|
||||||
|
id := mux.Vars(r)["id"]
|
||||||
|
req.UpdatedAt = time.Now().Unix()
|
||||||
|
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||||
|
var apierr basemodel.BaseApiError
|
||||||
|
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||||
|
RespondError(w, apierr, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -77,8 +135,8 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
zap.S().Infof("Get PATs for user: %+v", user.Id)
|
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
|
||||||
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id)
|
pats, apierr := ah.AppDao().ListPATs(ctx)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
@@ -86,7 +144,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.Respond(w, pats)
|
ah.Respond(w, pats)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
id := mux.Vars(r)["id"]
|
id := mux.Vars(r)["id"]
|
||||||
user, err := auth.GetUserFromRequest(r)
|
user, err := auth.GetUserFromRequest(r)
|
||||||
@@ -97,22 +155,11 @@ func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
|
|
||||||
if apierr != nil {
|
zap.L().Info("Revoke PAT with id", zap.String("id", id))
|
||||||
|
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if pat.UserID != user.Id {
|
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
|
||||||
RespondError(w, &model.ApiError{
|
|
||||||
Typ: model.ErrorUnauthorized,
|
|
||||||
Err: fmt.Errorf("unauthorized PAT delete request"),
|
|
||||||
}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
zap.S().Debugf("Delete PAT with id: %+v", id)
|
|
||||||
if apierr := ah.AppDao().DeletePAT(ctx, id); apierr != nil {
|
|
||||||
RespondError(w, apierr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ah.Respond(w, map[string]string{"data": "pat deleted successfully"})
|
|
||||||
}
|
}
|
||||||
|
|||||||
118
ee/query-service/app/api/queryrange.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||||
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
bodyBytes, _ := io.ReadAll(r.Body)
|
||||||
|
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
|
|
||||||
|
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
|
||||||
|
|
||||||
|
if apiErrorObj != nil {
|
||||||
|
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
|
||||||
|
RespondError(w, apiErrorObj, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
queryRangeParams.Version = "v4"
|
||||||
|
|
||||||
|
// add temporality for each metric
|
||||||
|
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||||
|
if temporalityErr != nil {
|
||||||
|
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
anomalyQueryExists := false
|
||||||
|
anomalyQuery := &v3.BuilderQuery{}
|
||||||
|
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||||
|
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||||
|
for _, fn := range query.Functions {
|
||||||
|
if fn.Name == v3.FunctionNameAnomaly {
|
||||||
|
anomalyQueryExists = true
|
||||||
|
anomalyQuery = query
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if anomalyQueryExists {
|
||||||
|
// ensure all queries have metric data source, and there should be only one anomaly query
|
||||||
|
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||||
|
if query.DataSource != v3.DataSourceMetrics {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the threshold, and seasonality from the anomaly query
|
||||||
|
var seasonality anomaly.Seasonality
|
||||||
|
for _, fn := range anomalyQuery.Functions {
|
||||||
|
if fn.Name == v3.FunctionNameAnomaly {
|
||||||
|
seasonalityStr, ok := fn.NamedArgs["seasonality"].(string)
|
||||||
|
if !ok {
|
||||||
|
seasonalityStr = "daily"
|
||||||
|
}
|
||||||
|
if seasonalityStr == "weekly" {
|
||||||
|
seasonality = anomaly.SeasonalityWeekly
|
||||||
|
} else if seasonalityStr == "daily" {
|
||||||
|
seasonality = anomaly.SeasonalityDaily
|
||||||
|
} else {
|
||||||
|
seasonality = anomaly.SeasonalityHourly
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var provider anomaly.Provider
|
||||||
|
switch seasonality {
|
||||||
|
case anomaly.SeasonalityWeekly:
|
||||||
|
provider = anomaly.NewWeeklyProvider(
|
||||||
|
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||||
|
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||||
|
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||||
|
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||||
|
)
|
||||||
|
case anomaly.SeasonalityDaily:
|
||||||
|
provider = anomaly.NewDailyProvider(
|
||||||
|
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||||
|
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||||
|
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||||
|
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||||
|
)
|
||||||
|
case anomaly.SeasonalityHourly:
|
||||||
|
provider = anomaly.NewHourlyProvider(
|
||||||
|
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||||
|
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||||
|
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||||
|
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp := v3.QueryRangeResponse{
|
||||||
|
Result: anomalies.Results,
|
||||||
|
ResultType: "anomaly",
|
||||||
|
}
|
||||||
|
aH.Respond(w, resp)
|
||||||
|
} else {
|
||||||
|
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
|
aH.QueryRangeV4(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,10 +2,8 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
@@ -15,21 +13,17 @@ import (
|
|||||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||||
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
|
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||||
ah.APIHandler.SearchTraces(w, r)
|
ah.APIHandler.SearchTraces(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
traceId, spanId, levelUpInt, levelDownInt, err := baseapp.ParseSearchTracesParams(r)
|
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
|
|
||||||
if err != nil {
|
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||||
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
|
|
||||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,401 +0,0 @@
|
|||||||
package db
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetMetricResultEE runs the query and returns list of time series
|
|
||||||
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
|
||||||
|
|
||||||
defer utils.Elapsed("GetMetricResult")()
|
|
||||||
zap.S().Infof("Executing metric result query: %s", query)
|
|
||||||
|
|
||||||
var hash string
|
|
||||||
// If getSubTreeSpans function is used in the clickhouse query
|
|
||||||
if strings.Index(query, "getSubTreeSpans(") != -1 {
|
|
||||||
var err error
|
|
||||||
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
|
|
||||||
if err == fmt.Errorf("No spans found for the given query") {
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rows, err := r.conn.Query(ctx, query)
|
|
||||||
zap.S().Debug(query)
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debug("Error in processing query: ", err)
|
|
||||||
return nil, "", fmt.Errorf("error in processing query")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
columnTypes = rows.ColumnTypes()
|
|
||||||
columnNames = rows.Columns()
|
|
||||||
vars = make([]interface{}, len(columnTypes))
|
|
||||||
)
|
|
||||||
for i := range columnTypes {
|
|
||||||
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
|
|
||||||
}
|
|
||||||
// when group by is applied, each combination of cartesian product
|
|
||||||
// of attributes is separate series. each item in metricPointsMap
|
|
||||||
// represent a unique series.
|
|
||||||
metricPointsMap := make(map[string][]basemodel.MetricPoint)
|
|
||||||
// attribute key-value pairs for each group selection
|
|
||||||
attributesMap := make(map[string]map[string]string)
|
|
||||||
|
|
||||||
defer rows.Close()
|
|
||||||
for rows.Next() {
|
|
||||||
if err := rows.Scan(vars...); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
var groupBy []string
|
|
||||||
var metricPoint basemodel.MetricPoint
|
|
||||||
groupAttributes := make(map[string]string)
|
|
||||||
// Assuming that the end result row contains a timestamp, value and option labels
|
|
||||||
// Label key and value are both strings.
|
|
||||||
for idx, v := range vars {
|
|
||||||
colName := columnNames[idx]
|
|
||||||
switch v := v.(type) {
|
|
||||||
case *string:
|
|
||||||
// special case for returning all labels
|
|
||||||
if colName == "fullLabels" {
|
|
||||||
var metric map[string]string
|
|
||||||
err := json.Unmarshal([]byte(*v), &metric)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
for key, val := range metric {
|
|
||||||
groupBy = append(groupBy, val)
|
|
||||||
groupAttributes[key] = val
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
groupBy = append(groupBy, *v)
|
|
||||||
groupAttributes[colName] = *v
|
|
||||||
}
|
|
||||||
case *time.Time:
|
|
||||||
metricPoint.Timestamp = v.UnixMilli()
|
|
||||||
case *float64:
|
|
||||||
metricPoint.Value = *v
|
|
||||||
case **float64:
|
|
||||||
// ch seems to return this type when column is derived from
|
|
||||||
// SELECT count(*)/ SELECT count(*)
|
|
||||||
floatVal := *v
|
|
||||||
if floatVal != nil {
|
|
||||||
metricPoint.Value = *floatVal
|
|
||||||
}
|
|
||||||
case *float32:
|
|
||||||
float32Val := float32(*v)
|
|
||||||
metricPoint.Value = float64(float32Val)
|
|
||||||
case *uint8, *uint64, *uint16, *uint32:
|
|
||||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
|
||||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
|
|
||||||
} else {
|
|
||||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
|
|
||||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
|
|
||||||
}
|
|
||||||
case *int8, *int16, *int32, *int64:
|
|
||||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
|
||||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
|
|
||||||
} else {
|
|
||||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
|
|
||||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(groupBy)
|
|
||||||
key := strings.Join(groupBy, "")
|
|
||||||
attributesMap[key] = groupAttributes
|
|
||||||
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
for key := range metricPointsMap {
|
|
||||||
points := metricPointsMap[key]
|
|
||||||
// first point in each series could be invalid since the
|
|
||||||
// aggregations are applied with point from prev series
|
|
||||||
if len(points) != 0 && len(points) > 1 {
|
|
||||||
points = points[1:]
|
|
||||||
}
|
|
||||||
attributes := attributesMap[key]
|
|
||||||
series := basemodel.Series{Labels: attributes, Points: points}
|
|
||||||
seriesList = append(seriesList, &series)
|
|
||||||
}
|
|
||||||
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
|
||||||
// if err != nil {
|
|
||||||
// zap.S().Error("Error in dropping temporary table: ", err)
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
if hash == "" {
|
|
||||||
return seriesList, hash, nil
|
|
||||||
} else {
|
|
||||||
return seriesList, "getSubTreeSpans" + hash, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
|
||||||
|
|
||||||
zap.S().Debugf("Executing getSubTreeSpans function")
|
|
||||||
|
|
||||||
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
|
||||||
|
|
||||||
// process the query to fetch subTree query
|
|
||||||
var subtreeInput string
|
|
||||||
query, subtreeInput, hash = processQuery(query, hash)
|
|
||||||
|
|
||||||
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Error("Error in dropping temporary table: ", err)
|
|
||||||
return query, hash, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary table to store the getSubTreeSpans() results
|
|
||||||
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
|
|
||||||
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Error("Error in creating temporary table: ", err)
|
|
||||||
return query, hash, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
|
||||||
getSpansSubQuery := subtreeInput
|
|
||||||
// Execute the subTree query
|
|
||||||
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
|
|
||||||
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
|
||||||
|
|
||||||
// zap.S().Info(getSpansSubQuery)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
|
||||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
|
||||||
}
|
|
||||||
|
|
||||||
var searchScanResponses []basemodel.SearchSpanDBResponseItem
|
|
||||||
|
|
||||||
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
|
|
||||||
// Fetch all the spans from of same TraceID so that we can build subtree
|
|
||||||
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
|
||||||
|
|
||||||
if len(getSpansSubQueryDBResponses) == 0 {
|
|
||||||
return query, hash, fmt.Errorf("No spans found for the given query")
|
|
||||||
}
|
|
||||||
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
|
|
||||||
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
|
||||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process model to fetch the spans
|
|
||||||
zap.S().Debugf("Processing model to fetch the spans")
|
|
||||||
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
|
||||||
for _, item := range searchScanResponses {
|
|
||||||
var jsonItem basemodel.SearchSpanResponseItem
|
|
||||||
json.Unmarshal([]byte(item.Model), &jsonItem)
|
|
||||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
|
|
||||||
if jsonItem.Events == nil {
|
|
||||||
jsonItem.Events = []string{}
|
|
||||||
}
|
|
||||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
|
||||||
}
|
|
||||||
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
|
||||||
// Use map to store pointer to the spans to avoid duplicates and save memory
|
|
||||||
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
|
|
||||||
|
|
||||||
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
|
|
||||||
return query, hash, err
|
|
||||||
}
|
|
||||||
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
|
|
||||||
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Error("Error in preparing batch statement: ", err)
|
|
||||||
return query, hash, err
|
|
||||||
}
|
|
||||||
for _, span := range treeSearchResponse {
|
|
||||||
var parentID string
|
|
||||||
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
|
|
||||||
parentID = span.References[0].SpanId
|
|
||||||
}
|
|
||||||
err = statement.Append(
|
|
||||||
time.Unix(0, int64(span.TimeUnixNano)),
|
|
||||||
span.TraceID,
|
|
||||||
span.SpanID,
|
|
||||||
parentID,
|
|
||||||
span.RootSpanID,
|
|
||||||
span.ServiceName,
|
|
||||||
span.Name,
|
|
||||||
span.RootName,
|
|
||||||
uint64(span.DurationNano),
|
|
||||||
int8(span.Kind),
|
|
||||||
span.TagMap,
|
|
||||||
span.Events,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
|
||||||
return query, hash, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
|
|
||||||
err = statement.Send()
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Error("Error in sending statement: ", err)
|
|
||||||
return query, hash, err
|
|
||||||
}
|
|
||||||
return query, hash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func processQuery(query string, hash string) (string, string, string) {
|
|
||||||
re3 := regexp.MustCompile(`getSubTreeSpans`)
|
|
||||||
|
|
||||||
submatchall3 := re3.FindAllStringIndex(query, -1)
|
|
||||||
getSubtreeSpansMatchIndex := submatchall3[0][1]
|
|
||||||
|
|
||||||
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
|
|
||||||
|
|
||||||
sqlCompleteIndex := 0
|
|
||||||
countParenthesisImbalance := 0
|
|
||||||
for i, char := range query2countParenthesis {
|
|
||||||
|
|
||||||
if string(char) == "(" {
|
|
||||||
countParenthesisImbalance += 1
|
|
||||||
}
|
|
||||||
if string(char) == ")" {
|
|
||||||
countParenthesisImbalance -= 1
|
|
||||||
}
|
|
||||||
if countParenthesisImbalance == 0 {
|
|
||||||
sqlCompleteIndex = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
|
|
||||||
|
|
||||||
// hash the subtreeInput
|
|
||||||
hmd5 := md5.Sum([]byte(subtreeInput))
|
|
||||||
hash = fmt.Sprintf("%x", hmd5)
|
|
||||||
|
|
||||||
// Reformat the query to use the getSubTreeSpans function
|
|
||||||
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
|
|
||||||
return query, subtreeInput, hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
|
|
||||||
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
|
|
||||||
|
|
||||||
var spans []*model.SpanForTraceDetails
|
|
||||||
for _, spanItem := range payload {
|
|
||||||
var parentID string
|
|
||||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
|
||||||
parentID = spanItem.References[0].SpanId
|
|
||||||
}
|
|
||||||
span := &model.SpanForTraceDetails{
|
|
||||||
TimeUnixNano: spanItem.TimeUnixNano,
|
|
||||||
SpanID: spanItem.SpanID,
|
|
||||||
TraceID: spanItem.TraceID,
|
|
||||||
ServiceName: spanItem.ServiceName,
|
|
||||||
Name: spanItem.Name,
|
|
||||||
Kind: spanItem.Kind,
|
|
||||||
DurationNano: spanItem.DurationNano,
|
|
||||||
TagMap: spanItem.TagMap,
|
|
||||||
ParentID: parentID,
|
|
||||||
Events: spanItem.Events,
|
|
||||||
HasError: spanItem.HasError,
|
|
||||||
}
|
|
||||||
spans = append(spans, span)
|
|
||||||
}
|
|
||||||
|
|
||||||
zap.S().Debug("Building Tree")
|
|
||||||
roots, err := buildSpanTrees(&spans)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
|
|
||||||
// Every span which was fetched from getSubTree Input SQL query is considered root
|
|
||||||
// For each root, get the subtree spans
|
|
||||||
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
|
||||||
targetSpan := &model.SpanForTraceDetails{}
|
|
||||||
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
|
||||||
// Search target span object in the tree
|
|
||||||
for _, root := range roots {
|
|
||||||
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
|
||||||
if targetSpan != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if targetSpan == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
// Build subtree for the target span
|
|
||||||
// Mark the target span as root by setting parent ID as empty string
|
|
||||||
targetSpan.ParentID = ""
|
|
||||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
|
||||||
children := []*model.SpanForTraceDetails{}
|
|
||||||
|
|
||||||
// Get the subtree child spans
|
|
||||||
for i := 0; len(preParents) != 0; i++ {
|
|
||||||
parents := []*model.SpanForTraceDetails{}
|
|
||||||
for _, parent := range preParents {
|
|
||||||
children = append(children, parent.Children...)
|
|
||||||
parents = append(parents, parent.Children...)
|
|
||||||
}
|
|
||||||
preParents = parents
|
|
||||||
}
|
|
||||||
|
|
||||||
resultSpans := children
|
|
||||||
// Add the target span to the result spans
|
|
||||||
resultSpans = append(resultSpans, targetSpan)
|
|
||||||
|
|
||||||
for _, item := range resultSpans {
|
|
||||||
references := []basemodel.OtelSpanRef{
|
|
||||||
{
|
|
||||||
TraceId: item.TraceID,
|
|
||||||
SpanId: item.ParentID,
|
|
||||||
RefType: "CHILD_OF",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if item.Events == nil {
|
|
||||||
item.Events = []string{}
|
|
||||||
}
|
|
||||||
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
|
|
||||||
TimeUnixNano: item.TimeUnixNano,
|
|
||||||
SpanID: item.SpanID,
|
|
||||||
TraceID: item.TraceID,
|
|
||||||
ServiceName: item.ServiceName,
|
|
||||||
Name: item.Name,
|
|
||||||
Kind: item.Kind,
|
|
||||||
References: references,
|
|
||||||
DurationNano: item.DurationNano,
|
|
||||||
TagMap: item.TagMap,
|
|
||||||
Events: item.Events,
|
|
||||||
HasError: item.HasError,
|
|
||||||
RootSpanID: getSpansSubQueryDBResponse.SpanID,
|
|
||||||
RootName: targetSpan.Name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return searchSpansResult, nil
|
|
||||||
}
|
|
||||||
@@ -25,8 +25,9 @@ func NewDataConnector(
|
|||||||
maxOpenConns int,
|
maxOpenConns int,
|
||||||
dialTimeout time.Duration,
|
dialTimeout time.Duration,
|
||||||
cluster string,
|
cluster string,
|
||||||
|
useLogsNewSchema bool,
|
||||||
) *ClickhouseReader {
|
) *ClickhouseReader {
|
||||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
|
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
||||||
return &ClickhouseReader{
|
return &ClickhouseReader{
|
||||||
conn: ch.GetConn(),
|
conn: ch.GetConn(),
|
||||||
appdb: localDB,
|
appdb: localDB,
|
||||||
|
|||||||
@@ -13,6 +13,11 @@ import (
|
|||||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||||
var spans []*model.SpanForTraceDetails
|
var spans []*model.SpanForTraceDetails
|
||||||
|
|
||||||
|
// if targetSpanId is null or not present then randomly select a span as targetSpanId
|
||||||
|
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
|
||||||
|
targetSpanId = payload[0].SpanID
|
||||||
|
}
|
||||||
|
|
||||||
// Build a slice of spans from the payload
|
// Build a slice of spans from the payload
|
||||||
for _, spanItem := range payload {
|
for _, spanItem := range payload {
|
||||||
var parentID string
|
var parentID string
|
||||||
@@ -49,14 +54,14 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the target span is not found, return span not found error
|
// If the target span is not found, return span not found error
|
||||||
if targetSpan == nil {
|
if targetSpan == nil {
|
||||||
return nil, errors.New("Span not found")
|
return nil, errors.New("span not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the final result
|
// Build the final result
|
||||||
@@ -113,8 +118,9 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
|||||||
}
|
}
|
||||||
|
|
||||||
searchSpansResult := []basemodel.SearchSpansResult{{
|
searchSpansResult := []basemodel.SearchSpansResult{{
|
||||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
|
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
|
||||||
Events: make([][]interface{}, len(resultSpansSet)),
|
Events: make([][]interface{}, len(resultSpansSet)),
|
||||||
|
IsSubTree: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,7 +192,7 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
|
|||||||
|
|
||||||
// If the parent span is not found, add current span to list of roots
|
// If the parent span is not found, add current span to list of roots
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
|
// zap.L().Debug("Parent Span not found parent_id: ", span.ParentID)
|
||||||
roots = append(roots, span)
|
roots = append(roots, span)
|
||||||
span.ParentID = ""
|
span.ParentID = ""
|
||||||
continue
|
continue
|
||||||
@@ -213,7 +219,7 @@ func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, child := range current.Children {
|
for _, child := range current.Children {
|
||||||
if ok, _ := visited[child.SpanID]; !ok {
|
if ok := visited[child.SpanID]; !ok {
|
||||||
queue = append(queue, child)
|
queue = append(queue, child)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
@@ -20,11 +23,15 @@ import (
|
|||||||
"github.com/soheilhy/cmux"
|
"github.com/soheilhy/cmux"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/auth"
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/ee/query-service/rules"
|
||||||
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
|
||||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||||
@@ -34,10 +41,11 @@ import (
|
|||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||||
@@ -45,7 +53,7 @@ import (
|
|||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@@ -61,7 +69,6 @@ type ServerOptions struct {
|
|||||||
// alert specific params
|
// alert specific params
|
||||||
DisableRules bool
|
DisableRules bool
|
||||||
RuleRepoURL string
|
RuleRepoURL string
|
||||||
PreferDelta bool
|
|
||||||
PreferSpanMetrics bool
|
PreferSpanMetrics bool
|
||||||
MaxIdleConns int
|
MaxIdleConns int
|
||||||
MaxOpenConns int
|
MaxOpenConns int
|
||||||
@@ -69,14 +76,14 @@ type ServerOptions struct {
|
|||||||
CacheConfigPath string
|
CacheConfigPath string
|
||||||
FluxInterval string
|
FluxInterval string
|
||||||
Cluster string
|
Cluster string
|
||||||
|
GatewayUrl string
|
||||||
|
UseLogsNewSchema bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP api service
|
// Server runs HTTP api service
|
||||||
type Server struct {
|
type Server struct {
|
||||||
serverOptions *ServerOptions
|
serverOptions *ServerOptions
|
||||||
conn net.Listener
|
ruleManager *baserules.Manager
|
||||||
ruleManager *rules.Manager
|
|
||||||
separatePorts bool
|
|
||||||
|
|
||||||
// public http router
|
// public http router
|
||||||
httpConn net.Listener
|
httpConn net.Listener
|
||||||
@@ -86,9 +93,6 @@ type Server struct {
|
|||||||
privateConn net.Listener
|
privateConn net.Listener
|
||||||
privateHTTP *http.Server
|
privateHTTP *http.Server
|
||||||
|
|
||||||
// feature flags
|
|
||||||
featureLookup baseint.FeatureLookup
|
|
||||||
|
|
||||||
// Usage manager
|
// Usage manager
|
||||||
usageManager *usage.Manager
|
usageManager *usage.Manager
|
||||||
|
|
||||||
@@ -112,6 +116,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||||
|
|
||||||
|
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -120,6 +128,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
localDB.SetMaxOpenConns(10)
|
localDB.SetMaxOpenConns(10)
|
||||||
|
|
||||||
|
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// initiate license manager
|
// initiate license manager
|
||||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -133,7 +146,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
var reader interfaces.DataConnector
|
var reader interfaces.DataConnector
|
||||||
storage := os.Getenv("STORAGE")
|
storage := os.Getenv("STORAGE")
|
||||||
if storage == "clickhouse" {
|
if storage == "clickhouse" {
|
||||||
zap.S().Info("Using ClickHouse as datastore ...")
|
zap.L().Info("Using ClickHouse as datastore ...")
|
||||||
qb := db.NewDataConnector(
|
qb := db.NewDataConnector(
|
||||||
localDB,
|
localDB,
|
||||||
serverOptions.PromConfigPath,
|
serverOptions.PromConfigPath,
|
||||||
@@ -142,6 +155,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.MaxOpenConns,
|
serverOptions.MaxOpenConns,
|
||||||
serverOptions.DialTimeout,
|
serverOptions.DialTimeout,
|
||||||
serverOptions.Cluster,
|
serverOptions.Cluster,
|
||||||
|
serverOptions.UseLogsNewSchema,
|
||||||
)
|
)
|
||||||
go qb.Start(readerReady)
|
go qb.Start(readerReady)
|
||||||
reader = qb
|
reader = qb
|
||||||
@@ -156,6 +170,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var c cache.Cache
|
||||||
|
if serverOptions.CacheConfigPath != "" {
|
||||||
|
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c = cache.NewCache(cacheOpts)
|
||||||
|
}
|
||||||
|
|
||||||
<-readerReady
|
<-readerReady
|
||||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||||
@@ -163,21 +185,40 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.RuleRepoURL,
|
serverOptions.RuleRepoURL,
|
||||||
localDB,
|
localDB,
|
||||||
reader,
|
reader,
|
||||||
|
c,
|
||||||
serverOptions.DisableRules,
|
serverOptions.DisableRules,
|
||||||
lm)
|
lm,
|
||||||
|
serverOptions.UseLogsNewSchema,
|
||||||
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// initiate opamp
|
// initiate opamp
|
||||||
_, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
_, err = opAmpModel.InitDB(localDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
integrationsController, err := integrations.NewController(localDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"couldn't create integrations controller: %w", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// ingestion pipelines manager
|
// ingestion pipelines manager
|
||||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
|
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||||
|
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -205,15 +246,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
telemetry.GetInstance().SetReader(reader)
|
telemetry.GetInstance().SetReader(reader)
|
||||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||||
|
|
||||||
var c cache.Cache
|
|
||||||
if serverOptions.CacheConfigPath != "" {
|
|
||||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c = cache.NewCache(cacheOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -223,7 +255,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
apiOpts := api.APIHandlerOptions{
|
apiOpts := api.APIHandlerOptions{
|
||||||
DataConnector: reader,
|
DataConnector: reader,
|
||||||
SkipConfig: skipConfig,
|
SkipConfig: skipConfig,
|
||||||
PreferDelta: serverOptions.PreferDelta,
|
|
||||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||||
MaxIdleConns: serverOptions.MaxIdleConns,
|
MaxIdleConns: serverOptions.MaxIdleConns,
|
||||||
MaxOpenConns: serverOptions.MaxOpenConns,
|
MaxOpenConns: serverOptions.MaxOpenConns,
|
||||||
@@ -233,9 +264,12 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
UsageManager: usageManager,
|
UsageManager: usageManager,
|
||||||
FeatureFlags: lm,
|
FeatureFlags: lm,
|
||||||
LicenseManager: lm,
|
LicenseManager: lm,
|
||||||
|
IntegrationsController: integrationsController,
|
||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
Cache: c,
|
Cache: c,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
|
Gateway: gatewayProxy,
|
||||||
|
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||||
@@ -276,8 +310,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
|
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddlewarePrivate)
|
r.Use(loggingMiddlewarePrivate)
|
||||||
@@ -289,7 +324,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
|||||||
// ip here for alert manager
|
// ip here for alert manager
|
||||||
AllowedOrigins: []string{"*"},
|
AllowedOrigins: []string{"*"},
|
||||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
|
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||||
})
|
})
|
||||||
|
|
||||||
handler := c.Handler(r)
|
handler := c.Handler(r)
|
||||||
@@ -302,41 +337,41 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
|||||||
|
|
||||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
|
|
||||||
|
// add auth middleware
|
||||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||||
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
user, err := auth.GetUserFromRequest(r, apiHandler)
|
||||||
if len(patToken) > 0 {
|
|
||||||
zap.S().Debugf("Received a non-zero length PAT token")
|
|
||||||
ctx := context.Background()
|
|
||||||
dao := apiHandler.AppDao()
|
|
||||||
|
|
||||||
user, err := dao.GetUserByPAT(ctx, patToken)
|
if err != nil {
|
||||||
if err == nil && user != nil {
|
return nil, err
|
||||||
zap.S().Debugf("Found valid PAT user: %+v", user)
|
|
||||||
return user, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debugf("Error while getting user for PAT: %+v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return baseauth.GetUserFromRequest(r)
|
|
||||||
|
if user.User.OrgId == "" {
|
||||||
|
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return user, nil
|
||||||
}
|
}
|
||||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||||
|
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddleware)
|
r.Use(loggingMiddleware)
|
||||||
|
|
||||||
apiHandler.RegisterRoutes(r, am)
|
apiHandler.RegisterRoutes(r, am)
|
||||||
apiHandler.RegisterMetricsRoutes(r, am)
|
|
||||||
apiHandler.RegisterLogsRoutes(r, am)
|
apiHandler.RegisterLogsRoutes(r, am)
|
||||||
|
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||||
|
apiHandler.RegisterWebSocketPaths(r, am)
|
||||||
|
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||||
|
|
||||||
c := cors.New(cors.Options{
|
c := cors.New(cors.Options{
|
||||||
AllowedOrigins: []string{"*"},
|
AllowedOrigins: []string{"*"},
|
||||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
|
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||||
})
|
})
|
||||||
|
|
||||||
handler := c.Handler(r)
|
handler := c.Handler(r)
|
||||||
@@ -348,6 +383,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
// loggingMiddleware is used for logging public api calls
|
// loggingMiddleware is used for logging public api calls
|
||||||
func loggingMiddleware(next http.Handler) http.Handler {
|
func loggingMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -355,10 +391,11 @@ func loggingMiddleware(next http.Handler) http.Handler {
|
|||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
|
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
// loggingMiddlewarePrivate is used for logging private api calls
|
// loggingMiddlewarePrivate is used for logging private api calls
|
||||||
// from internal services like alert manager
|
// from internal services like alert manager
|
||||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||||
@@ -367,38 +404,53 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
|||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
type loggingResponseWriter struct {
|
type loggingResponseWriter struct {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
statusCode int
|
statusCode int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||||
// we default to that status code.
|
// we default to that status code.
|
||||||
return &loggingResponseWriter{w, http.StatusOK}
|
return &loggingResponseWriter{w, http.StatusOK}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||||
lrw.statusCode = code
|
lrw.statusCode = code
|
||||||
lrw.ResponseWriter.WriteHeader(code)
|
lrw.ResponseWriter.WriteHeader(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
// Flush implements the http.Flush interface.
|
// Flush implements the http.Flush interface.
|
||||||
func (lrw *loggingResponseWriter) Flush() {
|
func (lrw *loggingResponseWriter) Flush() {
|
||||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface{}, bool) {
|
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||||
pathToExtractBodyFrom := "/api/v3/query_range"
|
// Support websockets
|
||||||
|
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
|
h, ok := lrw.ResponseWriter.(http.Hijacker)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, errors.New("hijack not supported")
|
||||||
|
}
|
||||||
|
return h.Hijack()
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||||
|
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||||
|
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||||
|
|
||||||
data := map[string]interface{}{}
|
data := map[string]interface{}{}
|
||||||
var postData *v3.QueryRangeParamsV3
|
var postData *v3.QueryRangeParamsV3
|
||||||
|
|
||||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
bodyBytes, err := io.ReadAll(r.Body)
|
bodyBytes, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -416,32 +468,68 @@ func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
referrer := r.Header.Get("Referer")
|
||||||
|
|
||||||
|
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the referrer", zap.Error(err))
|
||||||
|
}
|
||||||
|
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the alert: ", zap.Error(err))
|
||||||
|
}
|
||||||
|
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
|
||||||
|
}
|
||||||
|
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
signozMetricsUsed := false
|
signozMetricsUsed := false
|
||||||
signozLogsUsed := false
|
signozLogsUsed := false
|
||||||
dataSources := []string{}
|
signozTracesUsed := false
|
||||||
if postData != nil {
|
if postData != nil {
|
||||||
|
|
||||||
if postData.CompositeQuery != nil {
|
if postData.CompositeQuery != nil {
|
||||||
data["queryType"] = postData.CompositeQuery.QueryType
|
data["queryType"] = postData.CompositeQuery.QueryType
|
||||||
data["panelType"] = postData.CompositeQuery.PanelType
|
data["panelType"] = postData.CompositeQuery.PanelType
|
||||||
|
|
||||||
signozLogsUsed, signozMetricsUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if signozMetricsUsed || signozLogsUsed {
|
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||||
if signozMetricsUsed {
|
if signozMetricsUsed {
|
||||||
dataSources = append(dataSources, "metrics")
|
|
||||||
telemetry.GetInstance().AddActiveMetricsUser()
|
telemetry.GetInstance().AddActiveMetricsUser()
|
||||||
}
|
}
|
||||||
if signozLogsUsed {
|
if signozLogsUsed {
|
||||||
dataSources = append(dataSources, "logs")
|
|
||||||
telemetry.GetInstance().AddActiveLogsUser()
|
telemetry.GetInstance().AddActiveLogsUser()
|
||||||
}
|
}
|
||||||
data["dataSources"] = dataSources
|
if signozTracesUsed {
|
||||||
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
telemetry.GetInstance().AddActiveTracesUser()
|
||||||
|
}
|
||||||
|
data["metricsUsed"] = signozMetricsUsed
|
||||||
|
data["logsUsed"] = signozLogsUsed
|
||||||
|
data["tracesUsed"] = signozTracesUsed
|
||||||
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_V3, data, userEmail, true)
|
// switch case to set data["screen"] based on the referrer
|
||||||
|
switch {
|
||||||
|
case dashboardMatched:
|
||||||
|
data["screen"] = "panel"
|
||||||
|
case alertMatched:
|
||||||
|
data["screen"] = "alert"
|
||||||
|
case logsExplorerMatched:
|
||||||
|
data["screen"] = "logs-explorer"
|
||||||
|
case traceExplorerMatched:
|
||||||
|
data["screen"] = "traces-explorer"
|
||||||
|
default:
|
||||||
|
data["screen"] = "unknown"
|
||||||
|
return data, true
|
||||||
|
}
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return data, true
|
return data, true
|
||||||
@@ -463,12 +551,12 @@ func getActiveLogs(path string, r *http.Request) {
|
|||||||
|
|
||||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := auth.AttachJwtToContext(r.Context(), r)
|
ctx := baseauth.AttachJwtToContext(r.Context(), r)
|
||||||
r = r.WithContext(ctx)
|
r = r.WithContext(ctx)
|
||||||
route := mux.CurrentRoute(r)
|
route := mux.CurrentRoute(r)
|
||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
|
|
||||||
queryRangeV3data, metadataExists := extractQueryRangeV3Data(path, r)
|
queryRangeData, metadataExists := extractQueryRangeData(path, r)
|
||||||
getActiveLogs(path, r)
|
getActiveLogs(path, r)
|
||||||
|
|
||||||
lrw := NewLoggingResponseWriter(w)
|
lrw := NewLoggingResponseWriter(w)
|
||||||
@@ -476,21 +564,22 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
|||||||
|
|
||||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||||
if metadataExists {
|
if metadataExists {
|
||||||
for key, value := range queryRangeV3data {
|
for key, value := range queryRangeData {
|
||||||
data[key] = value
|
data[key] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
||||||
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail)
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
|
||||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
@@ -521,7 +610,7 @@ func (s *Server) initListeners() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
||||||
|
|
||||||
// listen on private port to support internal services
|
// listen on private port to support internal services
|
||||||
privateHostPort := s.serverOptions.PrivateHostPort
|
privateHostPort := s.serverOptions.PrivateHostPort
|
||||||
@@ -534,7 +623,7 @@ func (s *Server) initListeners() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -546,7 +635,7 @@ func (s *Server) Start() error {
|
|||||||
if !s.serverOptions.DisableRules {
|
if !s.serverOptions.DisableRules {
|
||||||
s.ruleManager.Start()
|
s.ruleManager.Start()
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.initListeners()
|
err := s.initListeners()
|
||||||
@@ -560,23 +649,23 @@ func (s *Server) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
||||||
|
|
||||||
switch err := s.httpServer.Serve(s.httpConn); err {
|
switch err := s.httpServer.Serve(s.httpConn); err {
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||||
// normal exit, nothing to do
|
// normal exit, nothing to do
|
||||||
default:
|
default:
|
||||||
zap.S().Error("Could not start HTTP server", zap.Error(err))
|
zap.L().Error("Could not start HTTP server", zap.Error(err))
|
||||||
}
|
}
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
zap.L().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
||||||
|
|
||||||
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Could not start pprof server", zap.Error(err))
|
zap.L().Error("Could not start pprof server", zap.Error(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -586,14 +675,14 @@ func (s *Server) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||||
|
|
||||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||||
// normal exit, nothing to do
|
// normal exit, nothing to do
|
||||||
zap.S().Info("private http server closed")
|
zap.L().Info("private http server closed")
|
||||||
default:
|
default:
|
||||||
zap.S().Error("Could not start private HTTP server", zap.Error(err))
|
zap.L().Error("Could not start private HTTP server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
@@ -601,10 +690,10 @@ func (s *Server) Start() error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||||
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Info("opamp ws server failed to start", err)
|
zap.L().Error("opamp ws server failed to start", zap.Error(err))
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -643,8 +732,10 @@ func makeRulesManager(
|
|||||||
ruleRepoURL string,
|
ruleRepoURL string,
|
||||||
db *sqlx.DB,
|
db *sqlx.DB,
|
||||||
ch baseint.Reader,
|
ch baseint.Reader,
|
||||||
|
cache cache.Cache,
|
||||||
disableRules bool,
|
disableRules bool,
|
||||||
fm baseInterface.FeatureLookup) (*rules.Manager, error) {
|
fm baseint.FeatureLookup,
|
||||||
|
useLogsNewSchema bool) (*baserules.Manager, error) {
|
||||||
|
|
||||||
// create engine
|
// create engine
|
||||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||||
@@ -660,27 +751,30 @@ func makeRulesManager(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create manager opts
|
// create manager opts
|
||||||
managerOpts := &rules.ManagerOptions{
|
managerOpts := &baserules.ManagerOptions{
|
||||||
NotifierOpts: notifierOpts,
|
NotifierOpts: notifierOpts,
|
||||||
Queriers: &rules.Queriers{
|
PqlEngine: pqle,
|
||||||
PqlEngine: pqle,
|
|
||||||
Ch: ch.GetConn(),
|
|
||||||
},
|
|
||||||
RepoURL: ruleRepoURL,
|
RepoURL: ruleRepoURL,
|
||||||
DBConn: db,
|
DBConn: db,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: nil,
|
Logger: zap.L(),
|
||||||
DisableRules: disableRules,
|
DisableRules: disableRules,
|
||||||
FeatureFlags: fm,
|
FeatureFlags: fm,
|
||||||
|
Reader: ch,
|
||||||
|
Cache: cache,
|
||||||
|
EvalDelay: baseconst.GetEvalDelay(),
|
||||||
|
|
||||||
|
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||||
|
UseLogsNewSchema: useLogsNewSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
// create Manager
|
// create Manager
|
||||||
manager, err := rules.NewManager(managerOpts)
|
manager, err := baserules.NewManager(managerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("rules manager is ready")
|
zap.L().Info("rules manager is ready")
|
||||||
|
|
||||||
return manager, nil
|
return manager, nil
|
||||||
}
|
}
|
||||||
|
|||||||
56
ee/query-service/auth/auth.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
|
||||||
|
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
||||||
|
if len(patToken) > 0 {
|
||||||
|
zap.L().Debug("Received a non-zero length PAT token")
|
||||||
|
ctx := context.Background()
|
||||||
|
dao := apiHandler.AppDao()
|
||||||
|
|
||||||
|
pat, err := dao.GetPAT(ctx, patToken)
|
||||||
|
if err == nil && pat != nil {
|
||||||
|
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
|
||||||
|
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||||
|
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
|
||||||
|
return nil, fmt.Errorf("PAT has expired")
|
||||||
|
}
|
||||||
|
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
user, err := dao.GetUser(ctx, pat.UserID)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
telemetry.GetInstance().SetPatTokenUser()
|
||||||
|
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
|
||||||
|
user.User.GroupId = group.Id
|
||||||
|
user.User.Id = pat.Id
|
||||||
|
return &basemodel.UserPayload{
|
||||||
|
User: user.User,
|
||||||
|
Role: pat.Role,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return baseauth.GetUserFromRequest(r)
|
||||||
|
}
|
||||||
@@ -11,7 +11,8 @@ const (
|
|||||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||||
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||||
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||||
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||||
|
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||||
|
|
||||||
func GetOrDefaultEnv(key string, fallback string) string {
|
func GetOrDefaultEnv(key string, fallback string) string {
|
||||||
v := os.Getenv(key)
|
v := os.Getenv(key)
|
||||||
|
|||||||
@@ -34,9 +34,11 @@ type ModelDao interface {
|
|||||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||||
|
|
||||||
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
||||||
|
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError
|
||||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||||
|
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
|
||||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
||||||
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError)
|
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
|
||||||
DeletePAT(ctx context.Context, id string) basemodel.BaseApiError
|
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,21 +20,24 @@ import (
|
|||||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
||||||
// get auth domain from email domain
|
// get auth domain from email domain
|
||||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
|
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
zap.S().Errorf("failed to get domain from email", apierr)
|
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||||
}
|
}
|
||||||
|
if domain == nil {
|
||||||
|
zap.L().Error("email domain does not match any authenticated domain", zap.String("email", email))
|
||||||
|
return nil, model.InternalErrorStr("email domain does not match any authenticated domain")
|
||||||
|
}
|
||||||
|
|
||||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||||
return nil, model.InternalErrorStr("failed to generate password hash")
|
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||||
}
|
}
|
||||||
|
|
||||||
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
|
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
|
||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +54,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
|
|||||||
|
|
||||||
user, apiErr = m.CreateUser(ctx, user, false)
|
user, apiErr = m.CreateUser(ctx, user, false)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
|
zap.L().Error("CreateUser failed", zap.Error(apiErr))
|
||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +68,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
|
|
||||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||||
if !apierr.IsNil() {
|
if !apierr.IsNil() {
|
||||||
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
|
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
|
||||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,7 +78,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||||
user = newUser
|
user = newUser
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error())
|
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
|
||||||
return "", apiErr
|
return "", apiErr
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -84,7 +87,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
|
|
||||||
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to generate token for SSO login user", err)
|
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,8 +146,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
// do nothing, just skip sso
|
// do nothing, just skip sso
|
||||||
ssoAvailable = false
|
ssoAvailable = false
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||||
return resp, model.BadRequest(err)
|
return resp, model.BadRequestStr(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,7 +163,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
if len(emailComponents) > 0 {
|
if len(emailComponents) > 0 {
|
||||||
emailDomain = emailComponents[1]
|
emailDomain = emailComponents[1]
|
||||||
}
|
}
|
||||||
zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
|
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||||
return resp, apierr
|
return resp, apierr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,7 +179,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||||
siteUrl, err := url.Parse(escapedUrl)
|
siteUrl, err := url.Parse(escapedUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse referer", err)
|
zap.L().Error("failed to parse referer", zap.Error(err))
|
||||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,7 +188,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
|
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||||
return resp, model.InternalError(err)
|
return resp, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -48,13 +48,13 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
|||||||
if domainIdStr != "" {
|
if domainIdStr != "" {
|
||||||
domainId, err := uuid.Parse(domainIdStr)
|
domainId, err := uuid.Parse(domainIdStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse domainId from relay state", err)
|
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
|
||||||
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
||||||
}
|
}
|
||||||
|
|
||||||
domain, err = m.GetDomain(ctx, domainId)
|
domain, err = m.GetDomain(ctx, domainId)
|
||||||
if (err != nil) || domain == nil {
|
if (err != nil) || domain == nil {
|
||||||
zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error())
|
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
|
||||||
return nil, fmt.Errorf("invalid credentials")
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
|||||||
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||||
domain = domainFromDB
|
domain = domainFromDB
|
||||||
if (err != nil) || domain == nil {
|
if (err != nil) || domain == nil {
|
||||||
zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error())
|
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
|
||||||
return nil, fmt.Errorf("invalid credentials")
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,7 +132,7 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
|||||||
for _, s := range stored {
|
for _, s := range stored {
|
||||||
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
||||||
if err := domain.LoadConfig(s.Data); err != nil {
|
if err := domain.LoadConfig(s.Data); err != nil {
|
||||||
zap.S().Errorf("ListDomains() failed", zap.Error(err))
|
zap.L().Error("ListDomains() failed", zap.Error(err))
|
||||||
}
|
}
|
||||||
domains = append(domains, domain)
|
domains = append(domains, domain)
|
||||||
}
|
}
|
||||||
@@ -153,7 +153,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
|
|
||||||
configJson, err := json.Marshal(domain)
|
configJson, err := json.Marshal(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
|
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
time.Now().Unix())
|
time.Now().Unix())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to insert domain in db", zap.Error(err))
|
zap.L().Error("failed to insert domain in db", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -178,13 +178,13 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||||
|
|
||||||
if domain.Id == uuid.Nil {
|
if domain.Id == uuid.Nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
configJson, err := json.Marshal(domain)
|
configJson, err := json.Marshal(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
zap.L().Error("domain update failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -195,7 +195,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
domain.Id)
|
domain.Id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
zap.L().Error("domain update failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,7 +206,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
||||||
|
|
||||||
if id == uuid.Nil {
|
if id == uuid.Nil {
|
||||||
zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -215,7 +215,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
|
|||||||
id)
|
id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain delete failed", zap.Error(err))
|
zap.L().Error("domain delete failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type modelDao struct {
|
type modelDao struct {
|
||||||
@@ -28,6 +29,41 @@ func (m *modelDao) checkFeature(key string) error {
|
|||||||
return m.flags.CheckFeature(key)
|
return m.flags.CheckFeature(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||||
|
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
|
||||||
|
rows, err := db.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to query table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
cid int
|
||||||
|
name string
|
||||||
|
ctype string
|
||||||
|
notnull int
|
||||||
|
dflt_value *string
|
||||||
|
pk int
|
||||||
|
)
|
||||||
|
for rows.Next() {
|
||||||
|
err := rows.Scan(&cid, &name, &ctype, ¬null, &dflt_value, &pk)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if name == columnName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = rows.Err()
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// InitDB creates and extends base model DB repository
|
// InitDB creates and extends base model DB repository
|
||||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||||
dao, err := basedsql.InitDB(dataSourceName)
|
dao, err := basedsql.InitDB(dataSourceName)
|
||||||
@@ -51,11 +87,16 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
|||||||
);
|
);
|
||||||
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
role TEXT NOT NULL,
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
token TEXT NOT NULL UNIQUE,
|
token TEXT NOT NULL UNIQUE,
|
||||||
name TEXT NOT NULL,
|
name TEXT NOT NULL,
|
||||||
created_at INTEGER NOT NULL,
|
created_at INTEGER NOT NULL,
|
||||||
expires_at INTEGER NOT NULL,
|
expires_at INTEGER NOT NULL,
|
||||||
|
updated_at INTEGER NOT NULL,
|
||||||
|
last_used INTEGER NOT NULL,
|
||||||
|
revoked BOOLEAN NOT NULL,
|
||||||
|
updated_by_user_id TEXT NOT NULL,
|
||||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||||
);
|
);
|
||||||
`
|
`
|
||||||
@@ -65,6 +106,36 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
|||||||
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "role") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
@@ -12,40 +13,124 @@ import (
|
|||||||
|
|
||||||
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
||||||
result, err := m.DB().ExecContext(ctx,
|
result, err := m.DB().ExecContext(ctx,
|
||||||
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||||
p.UserID,
|
p.UserID,
|
||||||
p.Token,
|
p.Token,
|
||||||
|
p.Role,
|
||||||
p.Name,
|
p.Name,
|
||||||
p.CreatedAt,
|
p.CreatedAt,
|
||||||
p.ExpiresAt)
|
p.ExpiresAt,
|
||||||
|
p.UpdatedAt,
|
||||||
|
p.UpdatedByUserID,
|
||||||
|
p.LastUsed,
|
||||||
|
p.Revoked,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
}
|
}
|
||||||
id, err := result.LastInsertId()
|
id, err := result.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to get last inserted id, err: %v", zap.Error(err))
|
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||||
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
}
|
}
|
||||||
p.Id = strconv.Itoa(int(id))
|
p.Id = strconv.Itoa(int(id))
|
||||||
|
createdByUser, _ := m.GetUser(ctx, p.UserID)
|
||||||
|
if createdByUser == nil {
|
||||||
|
p.CreatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.CreatedByUser = model.User{
|
||||||
|
Id: createdByUser.Id,
|
||||||
|
Name: createdByUser.Name,
|
||||||
|
Email: createdByUser.Email,
|
||||||
|
CreatedAt: createdByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError {
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
|
||||||
|
p.Role,
|
||||||
|
p.Name,
|
||||||
|
p.UpdatedAt,
|
||||||
|
p.UpdatedByUserID,
|
||||||
|
id)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
|
||||||
|
return model.InternalError(fmt.Errorf("PAT update failed"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError {
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
|
||||||
|
lastUsed,
|
||||||
|
token)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||||
|
return model.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=?;`, userID); err != nil {
|
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
|
||||||
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err))
|
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||||
}
|
}
|
||||||
|
for i := range pats {
|
||||||
|
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
|
||||||
|
if createdByUser == nil {
|
||||||
|
pats[i].CreatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pats[i].CreatedByUser = model.User{
|
||||||
|
Id: createdByUser.Id,
|
||||||
|
Name: createdByUser.Name,
|
||||||
|
Email: createdByUser.Email,
|
||||||
|
CreatedAt: createdByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
|
||||||
|
if updatedByUser == nil {
|
||||||
|
pats[i].UpdatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pats[i].UpdatedByUser = model.User{
|
||||||
|
Id: updatedByUser.Id,
|
||||||
|
Name: updatedByUser.Name,
|
||||||
|
Email: updatedByUser.Email,
|
||||||
|
CreatedAt: updatedByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: updatedByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return pats, nil
|
return pats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiError {
|
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError {
|
||||||
_, err := m.DB().ExecContext(ctx, `DELETE from personal_access_tokens where id=?;`, id)
|
updatedAt := time.Now().Unix()
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
|
||||||
|
userID, updatedAt, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to delete PAT, err: %v", zap.Error(err))
|
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("failed to delete PAT"))
|
return model.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -53,7 +138,7 @@ func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiEr
|
|||||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=?;`, token); err != nil {
|
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +155,7 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
|
|||||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=?;`, id); err != nil {
|
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,6 +169,7 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
|
|||||||
return &pats[0], nil
|
return &pats[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deprecated
|
||||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
||||||
users := []basemodel.UserPayload{}
|
users := []basemodel.UserPayload{}
|
||||||
|
|
||||||
|
|||||||
9
ee/query-service/integrations/gateway/noop.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http/httputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewNoopProxy() (*httputil.ReverseProxy, error) {
|
||||||
|
return &httputil.ReverseProxy{}, nil
|
||||||
|
}
|
||||||
66
ee/query-service/integrations/gateway/proxy.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RoutePrefix string = "/api/gateway"
|
||||||
|
AllowedPrefix string = "/v1/workspaces/me"
|
||||||
|
)
|
||||||
|
|
||||||
|
type proxy struct {
|
||||||
|
url *url.URL
|
||||||
|
stripPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProxy(u string, stripPath string) (*httputil.ReverseProxy, error) {
|
||||||
|
url, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy := &proxy{url: url, stripPath: stripPath}
|
||||||
|
|
||||||
|
return &httputil.ReverseProxy{
|
||||||
|
Rewrite: proxy.rewrite,
|
||||||
|
ModifyResponse: proxy.modifyResponse,
|
||||||
|
ErrorHandler: proxy.errorHandler,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxy) rewrite(pr *httputil.ProxyRequest) {
|
||||||
|
pr.SetURL(p.url)
|
||||||
|
pr.SetXForwarded()
|
||||||
|
pr.Out.URL.Path = cleanPath(strings.ReplaceAll(pr.Out.URL.Path, p.stripPath, ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxy) modifyResponse(res *http.Response) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxy) errorHandler(rw http.ResponseWriter, req *http.Request, err error) {
|
||||||
|
rw.WriteHeader(http.StatusBadGateway)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanPath(p string) string {
|
||||||
|
if p == "" {
|
||||||
|
return "/"
|
||||||
|
}
|
||||||
|
if p[0] != '/' {
|
||||||
|
p = "/" + p
|
||||||
|
}
|
||||||
|
np := path.Clean(p)
|
||||||
|
if p[len(p)-1] == '/' && np != "/" {
|
||||||
|
if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
|
||||||
|
np = p
|
||||||
|
} else {
|
||||||
|
np += "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return np
|
||||||
|
}
|
||||||
61
ee/query-service/integrations/gateway/proxy_test.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxyRewrite(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
url *url.URL
|
||||||
|
stripPath string
|
||||||
|
in *url.URL
|
||||||
|
expected *url.URL
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "SamePathAdded",
|
||||||
|
url: &url.URL{Scheme: "http", Host: "backend", Path: "/path1"},
|
||||||
|
stripPath: "/strip",
|
||||||
|
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
|
||||||
|
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/path1/path1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoStripPathInput",
|
||||||
|
url: &url.URL{Scheme: "http", Host: "backend"},
|
||||||
|
stripPath: "",
|
||||||
|
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
|
||||||
|
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/strip/path1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoStripPathPresentInReq",
|
||||||
|
url: &url.URL{Scheme: "http", Host: "backend"},
|
||||||
|
stripPath: "/not-found",
|
||||||
|
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
|
||||||
|
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/strip/path1"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
proxy, err := NewProxy(tc.url.String(), tc.stripPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
inReq, err := http.NewRequest(http.MethodGet, tc.in.String(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
proxyReq := &httputil.ProxyRequest{
|
||||||
|
In: inReq,
|
||||||
|
Out: inReq.Clone(context.Background()),
|
||||||
|
}
|
||||||
|
proxy.Rewrite(proxyReq)
|
||||||
|
|
||||||
|
assert.Equal(t, tc.expected.Host, proxyReq.Out.URL.Host)
|
||||||
|
assert.Equal(t, tc.expected.Scheme, proxyReq.Out.URL.Scheme)
|
||||||
|
assert.Equal(t, tc.expected.Path, proxyReq.Out.URL.Path)
|
||||||
|
assert.Equal(t, tc.expected.Query(), proxyReq.Out.URL.Query())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,11 +2,6 @@ package signozio
|
|||||||
|
|
||||||
type status string
|
type status string
|
||||||
|
|
||||||
const (
|
|
||||||
statusSuccess status = "success"
|
|
||||||
statusError status = "error"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ActivationResult struct {
|
type ActivationResult struct {
|
||||||
Status status `json:"status"`
|
Status status `json:"status"`
|
||||||
Data *ActivationResponse `json:"data,omitempty"`
|
Data *ActivationResponse `json:"data,omitempty"`
|
||||||
|
|||||||
@@ -47,13 +47,13 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to connect to license.signoz.io", err)
|
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
||||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
httpBody, err := io.ReadAll(httpResponse.Body)
|
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
|
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
||||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
result := ActivationResult{}
|
result := ActivationResult{}
|
||||||
err = json.Unmarshal(httpBody, &result)
|
err = json.Unmarshal(httpBody, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
|
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
||||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -48,8 +48,9 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
|||||||
return licenses, nil
|
return licenses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveLicense fetches the latest active license from DB
|
// GetActiveLicense fetches the latest active license from DB.
|
||||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
|
// If the license is not present, expect a nil license and a nil error in the output.
|
||||||
|
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||||
var err error
|
var err error
|
||||||
licenses := []model.License{}
|
licenses := []model.License{}
|
||||||
|
|
||||||
@@ -57,7 +58,7 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
|
|||||||
|
|
||||||
err = r.db.Select(&licenses, query)
|
err = r.db.Select(&licenses, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get active licenses from db: %v", err)
|
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
var active *model.License
|
var active *model.License
|
||||||
@@ -97,7 +98,7 @@ func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
|||||||
l.ValidationMessage)
|
l.ValidationMessage)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error in inserting license data: ", zap.Error(err))
|
zap.L().Error("error in inserting license data: ", zap.Error(err))
|
||||||
return fmt.Errorf("failed to insert license in db: %v", err)
|
return fmt.Errorf("failed to insert license in db: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,7 +111,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
|||||||
planDetails string) error {
|
planDetails string) error {
|
||||||
|
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return fmt.Errorf("Update Plan Details failed: license key is required")
|
return fmt.Errorf("update plan details failed: license key is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := `UPDATE licenses
|
query := `UPDATE licenses
|
||||||
@@ -121,7 +122,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
|||||||
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error in updating license: ", zap.Error(err))
|
zap.L().Error("error in updating license: ", zap.Error(err))
|
||||||
return fmt.Errorf("failed to update license in db: %v", err)
|
return fmt.Errorf("failed to update license in db: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,8 +49,7 @@ type Manager struct {
|
|||||||
activeFeatures basemodel.FeatureSet
|
activeFeatures basemodel.FeatureSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||||
|
|
||||||
if LM != nil {
|
if LM != nil {
|
||||||
return LM, nil
|
return LM, nil
|
||||||
}
|
}
|
||||||
@@ -66,7 +65,7 @@ func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
|||||||
repo: &repo,
|
repo: &repo,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.start(); err != nil {
|
if err := m.start(features...); err != nil {
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
LM = m
|
LM = m
|
||||||
@@ -74,8 +73,8 @@ func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// start loads active license in memory and initiates validator
|
// start loads active license in memory and initiates validator
|
||||||
func (lm *Manager) start() error {
|
func (lm *Manager) start(features ...basemodel.Feature) error {
|
||||||
err := lm.LoadActiveLicense()
|
err := lm.LoadActiveLicense(features...)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -85,7 +84,7 @@ func (lm *Manager) Stop() {
|
|||||||
<-lm.terminated
|
<-lm.terminated
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) SetActive(l *model.License) {
|
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
||||||
lm.mutex.Lock()
|
lm.mutex.Lock()
|
||||||
defer lm.mutex.Unlock()
|
defer lm.mutex.Unlock()
|
||||||
|
|
||||||
@@ -94,13 +93,13 @@ func (lm *Manager) SetActive(l *model.License) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lm.activeLicense = l
|
lm.activeLicense = l
|
||||||
lm.activeFeatures = l.FeatureSet
|
lm.activeFeatures = append(l.FeatureSet, features...)
|
||||||
// set default features
|
// set default features
|
||||||
setDefaultFeatures(lm)
|
setDefaultFeatures(lm)
|
||||||
|
|
||||||
err := lm.InitFeatures(lm.activeFeatures)
|
err := lm.InitFeatures(lm.activeFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Panicf("Couldn't activate features: %v", err)
|
zap.L().Panic("Couldn't activate features", zap.Error(err))
|
||||||
}
|
}
|
||||||
if !lm.validatorRunning {
|
if !lm.validatorRunning {
|
||||||
// we want to make sure only one validator runs,
|
// we want to make sure only one validator runs,
|
||||||
@@ -116,22 +115,21 @@ func setDefaultFeatures(lm *Manager) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadActiveLicense loads the most recent active license
|
// LoadActiveLicense loads the most recent active license
|
||||||
func (lm *Manager) LoadActiveLicense() error {
|
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
||||||
var err error
|
|
||||||
active, err := lm.repo.GetActiveLicense(context.Background())
|
active, err := lm.repo.GetActiveLicense(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if active != nil {
|
if active != nil {
|
||||||
lm.SetActive(active)
|
lm.SetActive(active, features...)
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("No active license found, defaulting to basic plan")
|
zap.L().Info("No active license found, defaulting to basic plan")
|
||||||
// if no active license is found, we default to basic(free) plan with all default features
|
// if no active license is found, we default to basic(free) plan with all default features
|
||||||
lm.activeFeatures = model.BasicPlan
|
lm.activeFeatures = model.BasicPlan
|
||||||
setDefaultFeatures(lm)
|
setDefaultFeatures(lm)
|
||||||
err := lm.InitFeatures(lm.activeFeatures)
|
err := lm.InitFeatures(lm.activeFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Couldn't initialize features: ", err)
|
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -149,7 +147,7 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a
|
|||||||
for _, l := range licenses {
|
for _, l := range licenses {
|
||||||
l.ParsePlan()
|
l.ParsePlan()
|
||||||
|
|
||||||
if l.Key == lm.activeLicense.Key {
|
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key {
|
||||||
l.IsCurrent = true
|
l.IsCurrent = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,7 +189,7 @@ func (lm *Manager) Validator(ctx context.Context) {
|
|||||||
|
|
||||||
// Validate validates the current active license
|
// Validate validates the current active license
|
||||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||||
zap.S().Info("License validation started")
|
zap.L().Info("License validation started")
|
||||||
if lm.activeLicense == nil {
|
if lm.activeLicense == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -201,12 +199,12 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
|
|
||||||
lm.lastValidated = time.Now().Unix()
|
lm.lastValidated = time.Now().Unix()
|
||||||
if reterr != nil {
|
if reterr != nil {
|
||||||
zap.S().Errorf("License validation completed with error", reterr)
|
zap.L().Error("License validation completed with error", zap.Error(reterr))
|
||||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||||
map[string]interface{}{"err": reterr.Error()}, "")
|
map[string]interface{}{"err": reterr.Error()}, "", true, false)
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("License validation completed with no errors")
|
zap.L().Info("License validation completed with no errors")
|
||||||
}
|
}
|
||||||
|
|
||||||
lm.mutex.Unlock()
|
lm.mutex.Unlock()
|
||||||
@@ -214,7 +212,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
|
|
||||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.S().Errorf("failed to validate license", apiError)
|
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||||
return apiError.Err
|
return apiError.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,7 +233,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := l.ParsePlan(); err != nil {
|
if err := l.ParsePlan(); err != nil {
|
||||||
zap.S().Errorf("failed to parse updated license", zap.Error(err))
|
zap.L().Error("failed to parse updated license", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,7 +243,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// unexpected db write issue but we can let the user continue
|
// unexpected db write issue but we can let the user continue
|
||||||
// and wait for update to work in next cycle.
|
// and wait for update to work in next cycle.
|
||||||
zap.S().Errorf("failed to validate license", zap.Error(err))
|
zap.L().Error("failed to validate license", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,14 +261,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
|||||||
userEmail, err := auth.GetEmailFromJwt(ctx)
|
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||||
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail)
|
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
response, apiError := validate.ActivateLicense(key, "")
|
response, apiError := validate.ActivateLicense(key, "")
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
|
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
|
||||||
return nil, apiError
|
return nil, apiError
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -284,14 +282,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
|||||||
err := l.ParsePlan()
|
err := l.ParsePlan()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// store the license before activating it
|
// store the license before activating it
|
||||||
err = lm.repo.InsertLicense(ctx, l)
|
err = lm.repo.InsertLicense(ctx, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func InitDB(db *sqlx.DB) error {
|
|||||||
|
|
||||||
_, err = db.Exec(table_schema)
|
_, err = db.Exec(table_schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error in creating licenses table: %s", err.Error())
|
return fmt.Errorf("error in creating licenses table: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
||||||
@@ -45,7 +45,7 @@ func InitDB(db *sqlx.DB) error {
|
|||||||
|
|
||||||
_, err = db.Exec(table_schema)
|
_, err = db.Exec(table_schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error in creating feature_status table: %s", err.Error())
|
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -14,10 +14,13 @@ import (
|
|||||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||||
"go.signoz.io/signoz/ee/query-service/app"
|
"go.signoz.io/signoz/ee/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||||
"go.signoz.io/signoz/pkg/query-service/version"
|
"go.signoz.io/signoz/pkg/query-service/version"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
|
||||||
|
prommodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||||
@@ -27,18 +30,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||||
config := zap.NewDevelopmentConfig()
|
config := zap.NewProductionConfig()
|
||||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
|
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||||
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
|
|
||||||
defaultLogLevel := zapcore.DebugLevel
|
|
||||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
|
||||||
config.EncoderConfig.TimeKey = "timestamp"
|
config.EncoderConfig.TimeKey = "timestamp"
|
||||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
|
||||||
|
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||||
|
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||||
|
defaultLogLevel := zapcore.InfoLevel
|
||||||
|
|
||||||
res := resource.NewWithAttributes(
|
res := resource.NewWithAttributes(
|
||||||
semconv.SchemaURL,
|
semconv.SchemaURL,
|
||||||
semconv.ServiceNameKey.String("query-service"),
|
semconv.ServiceNameKey.String("query-service"),
|
||||||
@@ -48,14 +52,16 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
|||||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||||
)
|
)
|
||||||
|
|
||||||
if enableQueryServiceLogOTLPExport == true {
|
if enableQueryServiceLogOTLPExport {
|
||||||
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
|
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||||
|
defer cancel()
|
||||||
|
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
|
log.Fatalf("failed to establish connection: %v", err)
|
||||||
} else {
|
} else {
|
||||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logExportBatchSizeInt = 1000
|
logExportBatchSizeInt = 512
|
||||||
}
|
}
|
||||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||||
BatchSize: logExportBatchSizeInt,
|
BatchSize: logExportBatchSizeInt,
|
||||||
@@ -73,6 +79,10 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
|||||||
return logger
|
return logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var promConfigPath, skipTopLvlOpsPath string
|
var promConfigPath, skipTopLvlOpsPath string
|
||||||
|
|
||||||
@@ -83,28 +93,30 @@ func main() {
|
|||||||
var ruleRepoURL string
|
var ruleRepoURL string
|
||||||
var cluster string
|
var cluster string
|
||||||
|
|
||||||
|
var useLogsNewSchema bool
|
||||||
var cacheConfigPath, fluxInterval string
|
var cacheConfigPath, fluxInterval string
|
||||||
var enableQueryServiceLogOTLPExport bool
|
var enableQueryServiceLogOTLPExport bool
|
||||||
var preferDelta bool
|
|
||||||
var preferSpanMetrics bool
|
var preferSpanMetrics bool
|
||||||
|
|
||||||
var maxIdleConns int
|
var maxIdleConns int
|
||||||
var maxOpenConns int
|
var maxOpenConns int
|
||||||
var dialTimeout time.Duration
|
var dialTimeout time.Duration
|
||||||
|
var gatewayUrl string
|
||||||
|
|
||||||
|
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
|
||||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
|
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@@ -113,14 +125,12 @@ func main() {
|
|||||||
zap.ReplaceGlobals(loggerMgr)
|
zap.ReplaceGlobals(loggerMgr)
|
||||||
defer loggerMgr.Sync() // flushes buffer, if any
|
defer loggerMgr.Sync() // flushes buffer, if any
|
||||||
|
|
||||||
logger := loggerMgr.Sugar()
|
|
||||||
version.PrintVersion()
|
version.PrintVersion()
|
||||||
|
|
||||||
serverOptions := &app.ServerOptions{
|
serverOptions := &app.ServerOptions{
|
||||||
HTTPHostPort: baseconst.HTTPHostPort,
|
HTTPHostPort: baseconst.HTTPHostPort,
|
||||||
PromConfigPath: promConfigPath,
|
PromConfigPath: promConfigPath,
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||||
PreferDelta: preferDelta,
|
|
||||||
PreferSpanMetrics: preferSpanMetrics,
|
PreferSpanMetrics: preferSpanMetrics,
|
||||||
PrivateHostPort: baseconst.PrivateHostPort,
|
PrivateHostPort: baseconst.PrivateHostPort,
|
||||||
DisableRules: disableRules,
|
DisableRules: disableRules,
|
||||||
@@ -131,28 +141,36 @@ func main() {
|
|||||||
CacheConfigPath: cacheConfigPath,
|
CacheConfigPath: cacheConfigPath,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
|
GatewayUrl: gatewayUrl,
|
||||||
|
UseLogsNewSchema: useLogsNewSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the jwt secret key
|
// Read the jwt secret key
|
||||||
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
||||||
|
|
||||||
if len(auth.JwtSecret) == 0 {
|
if len(auth.JwtSecret) == 0 {
|
||||||
zap.S().Warn("No JWT secret key is specified.")
|
zap.L().Warn("No JWT secret key is specified.")
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("No JWT secret key set successfully.")
|
zap.L().Info("JWT secret key set successfully.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||||
|
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
zap.L().Info("Migration successful")
|
||||||
}
|
}
|
||||||
|
|
||||||
server, err := app.NewServer(serverOptions)
|
server, err := app.NewServer(serverOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal("Failed to create server", zap.Error(err))
|
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := server.Start(); err != nil {
|
if err := server.Start(); err != nil {
|
||||||
logger.Fatal("Could not start servers", zap.Error(err))
|
zap.L().Fatal("Could not start server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := auth.InitAuthCache(context.Background()); err != nil {
|
if err := auth.InitAuthCache(context.Background()); err != nil {
|
||||||
logger.Fatal("Failed to initialize auth cache", zap.Error(err))
|
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
signalsChannel := make(chan os.Signal, 1)
|
signalsChannel := make(chan os.Signal, 1)
|
||||||
@@ -161,9 +179,9 @@ func main() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case status := <-server.HealthCheckStatus():
|
case status := <-server.HealthCheckStatus():
|
||||||
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
||||||
case <-signalsChannel:
|
case <-signalsChannel:
|
||||||
logger.Fatal("Received OS Interrupt Signal ... ")
|
zap.L().Fatal("Received OS Interrupt Signal ... ")
|
||||||
server.Stop()
|
server.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
saml2 "github.com/russellhaering/gosaml2"
|
saml2 "github.com/russellhaering/gosaml2"
|
||||||
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
|
||||||
"go.signoz.io/signoz/ee/query-service/sso"
|
"go.signoz.io/signoz/ee/query-service/sso"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
@@ -24,16 +24,16 @@ const (
|
|||||||
|
|
||||||
// OrgDomain identify org owned web domains for auth and other purposes
|
// OrgDomain identify org owned web domains for auth and other purposes
|
||||||
type OrgDomain struct {
|
type OrgDomain struct {
|
||||||
Id uuid.UUID `json:"id"`
|
Id uuid.UUID `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
OrgId string `json:"orgId"`
|
OrgId string `json:"orgId"`
|
||||||
SsoEnabled bool `json:"ssoEnabled"`
|
SsoEnabled bool `json:"ssoEnabled"`
|
||||||
SsoType SSOType `json:"ssoType"`
|
SsoType SSOType `json:"ssoType"`
|
||||||
|
|
||||||
SamlConfig *SamlConfig `json:"samlConfig"`
|
SamlConfig *SamlConfig `json:"samlConfig"`
|
||||||
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
||||||
|
|
||||||
Org *basemodel.Organization
|
Org *basemodel.Organization
|
||||||
}
|
}
|
||||||
|
|
||||||
func (od *OrgDomain) String() string {
|
func (od *OrgDomain) String() string {
|
||||||
@@ -100,11 +100,11 @@ func (od *OrgDomain) GetSAMLCert() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
||||||
// requesting OAuth and also used in processing response from google
|
// requesting OAuth and also used in processing response from google
|
||||||
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
||||||
if od.GoogleAuthConfig == nil {
|
if od.GoogleAuthConfig == nil {
|
||||||
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
|
return nil, fmt.Errorf("GOOGLE OAUTH is not setup correctly for this domain")
|
||||||
}
|
}
|
||||||
|
|
||||||
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
|
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
|
||||||
@@ -137,38 +137,36 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||||
|
|
||||||
|
|
||||||
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
||||||
|
|
||||||
// build redirect url from window.location sent by frontend
|
// build redirect url from window.location sent by frontend
|
||||||
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
||||||
|
|
||||||
// prepare state that gets relayed back when the auth provider
|
// prepare state that gets relayed back when the auth provider
|
||||||
// calls back our url. here we pass the app url (where signoz runs)
|
// calls back our url. here we pass the app url (where signoz runs)
|
||||||
// and the domain Id. The domain Id helps in identifying sso config
|
// and the domain Id. The domain Id helps in identifying sso config
|
||||||
// when the call back occurs and the app url is useful in redirecting user
|
// when the call back occurs and the app url is useful in redirecting user
|
||||||
// back to the right path.
|
// back to the right path.
|
||||||
// why do we need to pass app url? the callback typically is handled by backend
|
// why do we need to pass app url? the callback typically is handled by backend
|
||||||
// and sometimes backend might right at a different port or is unaware of frontend
|
// and sometimes backend might right at a different port or is unaware of frontend
|
||||||
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
||||||
// along with frontend window.location and use it to relay the information through
|
// along with frontend window.location and use it to relay the information through
|
||||||
// auth provider to the backend (HandleCallback or HandleSSO method).
|
// auth provider to the backend (HandleCallback or HandleSSO method).
|
||||||
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
||||||
|
|
||||||
|
|
||||||
switch (od.SsoType) {
|
switch od.SsoType {
|
||||||
case SAML:
|
case SAML:
|
||||||
|
|
||||||
sp, err := od.PrepareSamlRequest(siteUrl)
|
sp, err := od.PrepareSamlRequest(siteUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sp.BuildAuthURL(relayState)
|
return sp.BuildAuthURL(relayState)
|
||||||
|
|
||||||
case GoogleAuth:
|
case GoogleAuth:
|
||||||
|
|
||||||
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -176,9 +174,8 @@ func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
|||||||
return googleProvider.BuildAuthURL(relayState)
|
return googleProvider.BuildAuthURL(relayState)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
||||||
return "", fmt.Errorf("unsupported SSO config for the domain")
|
return "", fmt.Errorf("unsupported SSO config for the domain")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,32 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
type PAT struct {
|
type User struct {
|
||||||
Id string `json:"id" db:"id"`
|
Id string `json:"id" db:"id"`
|
||||||
UserID string `json:"userId" db:"user_id"`
|
Name string `json:"name" db:"name"`
|
||||||
Token string `json:"token" db:"token"`
|
Email string `json:"email" db:"email"`
|
||||||
Name string `json:"name" db:"name"`
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
ProfilePictureURL string `json:"profilePictureURL" db:"profile_picture_url"`
|
||||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
|
NotFound bool `json:"notFound"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreatePATRequestBody struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Role string `json:"role"`
|
||||||
|
ExpiresInDays int64 `json:"expiresInDays"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PAT struct {
|
||||||
|
Id string `json:"id" db:"id"`
|
||||||
|
UserID string `json:"userId" db:"user_id"`
|
||||||
|
CreatedByUser User `json:"createdByUser"`
|
||||||
|
UpdatedByUser User `json:"updatedByUser"`
|
||||||
|
Token string `json:"token" db:"token"`
|
||||||
|
Role string `json:"role" db:"role"`
|
||||||
|
Name string `json:"name" db:"name"`
|
||||||
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
|
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
|
||||||
|
UpdatedAt int64 `json:"updatedAt" db:"updated_at"`
|
||||||
|
LastUsed int64 `json:"lastUsed" db:"last_used"`
|
||||||
|
Revoked bool `json:"revoked" db:"revoked"`
|
||||||
|
UpdatedByUserID string `json:"updatedByUserId" db:"updated_by_user_id"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,8 @@ const Enterprise = "ENTERPRISE_PLAN"
|
|||||||
const DisableUpsell = "DISABLE_UPSELL"
|
const DisableUpsell = "DISABLE_UPSELL"
|
||||||
const Onboarding = "ONBOARDING"
|
const Onboarding = "ONBOARDING"
|
||||||
const ChatSupport = "CHAT_SUPPORT"
|
const ChatSupport = "CHAT_SUPPORT"
|
||||||
|
const Gateway = "GATEWAY"
|
||||||
|
const PremiumSupport = "PREMIUM_SUPPORT"
|
||||||
|
|
||||||
var BasicPlan = basemodel.FeatureSet{
|
var BasicPlan = basemodel.FeatureSet{
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
@@ -52,14 +54,14 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
Name: basemodel.QueryBuilderPanels,
|
Name: basemodel.QueryBuilderPanels,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 20,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.QueryBuilderAlerts,
|
Name: basemodel.QueryBuilderAlerts,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 10,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
@@ -90,6 +92,13 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: false,
|
Active: false,
|
||||||
@@ -104,6 +113,27 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: Gateway,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: PremiumSupport,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AnomalyDetection,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var ProPlan = basemodel.FeatureSet{
|
var ProPlan = basemodel.FeatureSet{
|
||||||
@@ -177,6 +207,13 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: true,
|
Active: true,
|
||||||
@@ -191,6 +228,27 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: Gateway,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: PremiumSupport,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AnomalyDetection,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var EnterprisePlan = basemodel.FeatureSet{
|
var EnterprisePlan = basemodel.FeatureSet{
|
||||||
@@ -264,6 +322,13 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: true,
|
Active: true,
|
||||||
@@ -279,17 +344,38 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: Onboarding,
|
Name: Onboarding,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: ChatSupport,
|
Name: ChatSupport,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: Gateway,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: PremiumSupport,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AnomalyDetection,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
393
ee/query-service/rules/anomaly.go
Normal file
@@ -0,0 +1,393 @@
|
|||||||
|
package rules
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/common"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
|
||||||
|
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils/times"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/formatter"
|
||||||
|
|
||||||
|
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||||
|
|
||||||
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RuleTypeAnomaly = "anomaly_rule"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AnomalyRule struct {
|
||||||
|
*baserules.BaseRule
|
||||||
|
|
||||||
|
mtx sync.Mutex
|
||||||
|
|
||||||
|
reader interfaces.Reader
|
||||||
|
|
||||||
|
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||||
|
querierV2 interfaces.Querier
|
||||||
|
|
||||||
|
provider anomaly.Provider
|
||||||
|
|
||||||
|
seasonality anomaly.Seasonality
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAnomalyRule(
|
||||||
|
id string,
|
||||||
|
p *baserules.PostableRule,
|
||||||
|
featureFlags interfaces.FeatureLookup,
|
||||||
|
reader interfaces.Reader,
|
||||||
|
cache cache.Cache,
|
||||||
|
opts ...baserules.RuleOption,
|
||||||
|
) (*AnomalyRule, error) {
|
||||||
|
|
||||||
|
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
||||||
|
|
||||||
|
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
t := AnomalyRule{
|
||||||
|
BaseRule: baseRule,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch strings.ToLower(p.RuleCondition.Seasonality) {
|
||||||
|
case "hourly":
|
||||||
|
t.seasonality = anomaly.SeasonalityHourly
|
||||||
|
case "daily":
|
||||||
|
t.seasonality = anomaly.SeasonalityDaily
|
||||||
|
case "weekly":
|
||||||
|
t.seasonality = anomaly.SeasonalityWeekly
|
||||||
|
default:
|
||||||
|
t.seasonality = anomaly.SeasonalityDaily
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
||||||
|
|
||||||
|
querierOptsV2 := querierV2.QuerierOptions{
|
||||||
|
Reader: reader,
|
||||||
|
Cache: cache,
|
||||||
|
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||||
|
FeatureLookup: featureFlags,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||||
|
t.reader = reader
|
||||||
|
if t.seasonality == anomaly.SeasonalityHourly {
|
||||||
|
t.provider = anomaly.NewHourlyProvider(
|
||||||
|
anomaly.WithCache[*anomaly.HourlyProvider](cache),
|
||||||
|
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||||
|
anomaly.WithReader[*anomaly.HourlyProvider](reader),
|
||||||
|
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](featureFlags),
|
||||||
|
)
|
||||||
|
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||||
|
t.provider = anomaly.NewDailyProvider(
|
||||||
|
anomaly.WithCache[*anomaly.DailyProvider](cache),
|
||||||
|
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||||
|
anomaly.WithReader[*anomaly.DailyProvider](reader),
|
||||||
|
anomaly.WithFeatureLookup[*anomaly.DailyProvider](featureFlags),
|
||||||
|
)
|
||||||
|
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||||
|
t.provider = anomaly.NewWeeklyProvider(
|
||||||
|
anomaly.WithCache[*anomaly.WeeklyProvider](cache),
|
||||||
|
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||||
|
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||||
|
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](featureFlags),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return &t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) Type() baserules.RuleType {
|
||||||
|
return RuleTypeAnomaly
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||||
|
|
||||||
|
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
|
||||||
|
|
||||||
|
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
||||||
|
end := ts.UnixMilli()
|
||||||
|
|
||||||
|
if r.EvalDelay() > 0 {
|
||||||
|
start = start - int64(r.EvalDelay().Milliseconds())
|
||||||
|
end = end - int64(r.EvalDelay().Milliseconds())
|
||||||
|
}
|
||||||
|
// round to minute otherwise we could potentially miss data
|
||||||
|
start = start - (start % (60 * 1000))
|
||||||
|
end = end - (end % (60 * 1000))
|
||||||
|
|
||||||
|
compositeQuery := r.Condition().CompositeQuery
|
||||||
|
|
||||||
|
if compositeQuery.PanelType != v3.PanelTypeGraph {
|
||||||
|
compositeQuery.PanelType = v3.PanelTypeGraph
|
||||||
|
}
|
||||||
|
|
||||||
|
// default mode
|
||||||
|
return &v3.QueryRangeParamsV3{
|
||||||
|
Start: start,
|
||||||
|
End: end,
|
||||||
|
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
|
||||||
|
CompositeQuery: compositeQuery,
|
||||||
|
Variables: make(map[string]interface{}, 0),
|
||||||
|
NoCache: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) GetSelectedQuery() string {
|
||||||
|
return r.Condition().GetSelectedQueryName()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baserules.Vector, error) {
|
||||||
|
|
||||||
|
params, err := r.prepareQueryRange(ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = r.PopulateTemporality(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("internal error while setting temporality")
|
||||||
|
}
|
||||||
|
|
||||||
|
anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{
|
||||||
|
Params: params,
|
||||||
|
Seasonality: r.seasonality,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryResult *v3.Result
|
||||||
|
for _, result := range anomalies.Results {
|
||||||
|
if result.QueryName == r.GetSelectedQuery() {
|
||||||
|
queryResult = result
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var resultVector baserules.Vector
|
||||||
|
|
||||||
|
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||||
|
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
||||||
|
|
||||||
|
for _, series := range queryResult.AnomalyScores {
|
||||||
|
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||||
|
if shouldAlert {
|
||||||
|
resultVector = append(resultVector, smpl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resultVector, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) {
|
||||||
|
|
||||||
|
prevState := r.State()
|
||||||
|
|
||||||
|
valueFormatter := formatter.FromUnit(r.Unit())
|
||||||
|
res, err := r.buildAndRunQuery(ctx, ts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.mtx.Lock()
|
||||||
|
defer r.mtx.Unlock()
|
||||||
|
|
||||||
|
resultFPs := map[uint64]struct{}{}
|
||||||
|
var alerts = make(map[uint64]*baserules.Alert, len(res))
|
||||||
|
|
||||||
|
for _, smpl := range res {
|
||||||
|
l := make(map[string]string, len(smpl.Metric))
|
||||||
|
for _, lbl := range smpl.Metric {
|
||||||
|
l[lbl.Name] = lbl.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||||
|
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||||
|
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
||||||
|
|
||||||
|
tmplData := baserules.AlertTemplateData(l, value, threshold)
|
||||||
|
// Inject some convenience variables that are easier to remember for users
|
||||||
|
// who are not used to Go's templating system.
|
||||||
|
defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}"
|
||||||
|
|
||||||
|
// utility function to apply go template on labels and annotations
|
||||||
|
expand := func(text string) string {
|
||||||
|
|
||||||
|
tmpl := baserules.NewTemplateExpander(
|
||||||
|
ctx,
|
||||||
|
defs+text,
|
||||||
|
"__alert_"+r.Name(),
|
||||||
|
tmplData,
|
||||||
|
times.Time(timestamp.FromTime(ts)),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
result, err := tmpl.Expand()
|
||||||
|
if err != nil {
|
||||||
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||||
|
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel)
|
||||||
|
resultLabels := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel).Labels()
|
||||||
|
|
||||||
|
for name, value := range r.Labels().Map() {
|
||||||
|
lb.Set(name, expand(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
lb.Set(labels.AlertNameLabel, r.Name())
|
||||||
|
lb.Set(labels.AlertRuleIdLabel, r.ID())
|
||||||
|
lb.Set(labels.RuleSourceLabel, r.GeneratorURL())
|
||||||
|
|
||||||
|
annotations := make(labels.Labels, 0, len(r.Annotations().Map()))
|
||||||
|
for name, value := range r.Annotations().Map() {
|
||||||
|
annotations = append(annotations, labels.Label{Name: name, Value: expand(value)})
|
||||||
|
}
|
||||||
|
if smpl.IsMissing {
|
||||||
|
lb.Set(labels.AlertNameLabel, "[No data] "+r.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
lbs := lb.Labels()
|
||||||
|
h := lbs.Hash()
|
||||||
|
resultFPs[h] = struct{}{}
|
||||||
|
|
||||||
|
if _, ok := alerts[h]; ok {
|
||||||
|
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
||||||
|
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
alerts[h] = &baserules.Alert{
|
||||||
|
Labels: lbs,
|
||||||
|
QueryResultLables: resultLabels,
|
||||||
|
Annotations: annotations,
|
||||||
|
ActiveAt: ts,
|
||||||
|
State: model.StatePending,
|
||||||
|
Value: smpl.V,
|
||||||
|
GeneratorURL: r.GeneratorURL(),
|
||||||
|
Receivers: r.PreferredChannels(),
|
||||||
|
Missing: smpl.IsMissing,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
||||||
|
|
||||||
|
// alerts[h] is ready, add or update active list now
|
||||||
|
for h, a := range alerts {
|
||||||
|
// Check whether we already have alerting state for the identifying label set.
|
||||||
|
// Update the last value and annotations if so, create a new alert entry otherwise.
|
||||||
|
if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive {
|
||||||
|
|
||||||
|
alert.Value = a.Value
|
||||||
|
alert.Annotations = a.Annotations
|
||||||
|
alert.Receivers = r.PreferredChannels()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Active[h] = a
|
||||||
|
}
|
||||||
|
|
||||||
|
itemsToAdd := []model.RuleStateHistory{}
|
||||||
|
|
||||||
|
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||||
|
for fp, a := range r.Active {
|
||||||
|
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
||||||
|
}
|
||||||
|
if _, ok := resultFPs[fp]; !ok {
|
||||||
|
// If the alert was previously firing, keep it around for a given
|
||||||
|
// retention time so it is reported as resolved to the AlertManager.
|
||||||
|
if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > baserules.ResolvedRetention) {
|
||||||
|
delete(r.Active, fp)
|
||||||
|
}
|
||||||
|
if a.State != model.StateInactive {
|
||||||
|
a.State = model.StateInactive
|
||||||
|
a.ResolvedAt = ts
|
||||||
|
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||||
|
RuleID: r.ID(),
|
||||||
|
RuleName: r.Name(),
|
||||||
|
State: model.StateInactive,
|
||||||
|
StateChanged: true,
|
||||||
|
UnixMilli: ts.UnixMilli(),
|
||||||
|
Labels: model.LabelsString(labelsJSON),
|
||||||
|
Fingerprint: a.QueryResultLables.Hash(),
|
||||||
|
Value: a.Value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.State == model.StatePending && ts.Sub(a.ActiveAt) >= r.HoldDuration() {
|
||||||
|
a.State = model.StateFiring
|
||||||
|
a.FiredAt = ts
|
||||||
|
state := model.StateFiring
|
||||||
|
if a.Missing {
|
||||||
|
state = model.StateNoData
|
||||||
|
}
|
||||||
|
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||||
|
RuleID: r.ID(),
|
||||||
|
RuleName: r.Name(),
|
||||||
|
State: state,
|
||||||
|
StateChanged: true,
|
||||||
|
UnixMilli: ts.UnixMilli(),
|
||||||
|
Labels: model.LabelsString(labelsJSON),
|
||||||
|
Fingerprint: a.QueryResultLables.Hash(),
|
||||||
|
Value: a.Value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentState := r.State()
|
||||||
|
|
||||||
|
overallStateChanged := currentState != prevState
|
||||||
|
for idx, item := range itemsToAdd {
|
||||||
|
item.OverallStateChanged = overallStateChanged
|
||||||
|
item.OverallState = currentState
|
||||||
|
itemsToAdd[idx] = item
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RecordRuleStateHistory(ctx, prevState, currentState, itemsToAdd)
|
||||||
|
|
||||||
|
return len(r.Active), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) String() string {
|
||||||
|
|
||||||
|
ar := baserules.PostableRule{
|
||||||
|
AlertName: r.Name(),
|
||||||
|
RuleCondition: r.Condition(),
|
||||||
|
EvalWindow: baserules.Duration(r.EvalWindow()),
|
||||||
|
Labels: r.Labels().Map(),
|
||||||
|
Annotations: r.Annotations().Map(),
|
||||||
|
PreferredChannels: r.PreferredChannels(),
|
||||||
|
}
|
||||||
|
|
||||||
|
byt, err := yaml.Marshal(ar)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(byt)
|
||||||
|
}
|
||||||
89
ee/query-service/rules/manager.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
package rules
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||||
|
)
|
||||||
|
|
||||||
|
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
|
||||||
|
|
||||||
|
rules := make([]baserules.Rule, 0)
|
||||||
|
var task baserules.Task
|
||||||
|
|
||||||
|
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
||||||
|
if opts.Rule.RuleType == baserules.RuleTypeThreshold {
|
||||||
|
// create a threshold rule
|
||||||
|
tr, err := baserules.NewThresholdRule(
|
||||||
|
ruleId,
|
||||||
|
opts.Rule,
|
||||||
|
opts.FF,
|
||||||
|
opts.Reader,
|
||||||
|
opts.UseLogsNewSchema,
|
||||||
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return task, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rules = append(rules, tr)
|
||||||
|
|
||||||
|
// create ch rule task for evalution
|
||||||
|
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||||
|
|
||||||
|
} else if opts.Rule.RuleType == baserules.RuleTypeProm {
|
||||||
|
|
||||||
|
// create promql rule
|
||||||
|
pr, err := baserules.NewPromRule(
|
||||||
|
ruleId,
|
||||||
|
opts.Rule,
|
||||||
|
opts.Logger,
|
||||||
|
opts.Reader,
|
||||||
|
opts.ManagerOpts.PqlEngine,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return task, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rules = append(rules, pr)
|
||||||
|
|
||||||
|
// create promql rule task for evalution
|
||||||
|
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||||
|
|
||||||
|
} else if opts.Rule.RuleType == baserules.RuleTypeAnomaly {
|
||||||
|
// create anomaly rule
|
||||||
|
ar, err := NewAnomalyRule(
|
||||||
|
ruleId,
|
||||||
|
opts.Rule,
|
||||||
|
opts.FF,
|
||||||
|
opts.Reader,
|
||||||
|
opts.Cache,
|
||||||
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return task, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rules = append(rules, ar)
|
||||||
|
|
||||||
|
// create anomaly rule task for evalution
|
||||||
|
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, baserules.RuleTypeProm, baserules.RuleTypeThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
return task, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTask returns an appropriate group for
|
||||||
|
// rule type
|
||||||
|
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {
|
||||||
|
if taskType == baserules.TaskTypeCh {
|
||||||
|
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||||
|
}
|
||||||
|
return baserules.NewPromRuleTask(name, "", frequency, rules, opts, notify, ruleDB)
|
||||||
|
}
|
||||||
@@ -102,6 +102,6 @@ func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*
|
|||||||
IDPCertificateStore: certStore,
|
IDPCertificateStore: certStore,
|
||||||
SPKeyStore: randomKeyStore,
|
SPKeyStore: randomKeyStore,
|
||||||
}
|
}
|
||||||
zap.S().Debugf("SAML request:", sp)
|
zap.L().Debug("SAML request", zap.Any("sp", sp))
|
||||||
return sp, nil
|
return sp, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickh
|
|||||||
tenantID := ""
|
tenantID := ""
|
||||||
if len(hostNameRegexMatches) == 2 {
|
if len(hostNameRegexMatches) == 2 {
|
||||||
tenantID = hostNameRegexMatches[1]
|
tenantID = hostNameRegexMatches[1]
|
||||||
tenantID = strings.TrimRight(tenantID, "-clickhouse")
|
tenantID = strings.TrimSuffix(tenantID, "-clickhouse")
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
@@ -91,12 +91,12 @@ func (lm *Manager) UploadUsage() {
|
|||||||
// check if license is present or not
|
// check if license is present or not
|
||||||
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
|
zap.L().Error("failed to get active license", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if license == nil {
|
if license == nil {
|
||||||
// we will not start the usage reporting if license is not present.
|
// we will not start the usage reporting if license is not present.
|
||||||
zap.S().Info("no license present, skipping usage reporting")
|
zap.L().Info("no license present, skipping usage reporting")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,7 +123,7 @@ func (lm *Manager) UploadUsage() {
|
|||||||
dbusages := []model.UsageDB{}
|
dbusages := []model.UsageDB{}
|
||||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||||
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
|
zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, u := range dbusages {
|
for _, u := range dbusages {
|
||||||
@@ -133,37 +133,33 @@ func (lm *Manager) UploadUsage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(usages) <= 0 {
|
if len(usages) <= 0 {
|
||||||
zap.S().Info("no snapshots to upload, skipping.")
|
zap.L().Info("no snapshots to upload, skipping.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("uploading usage data")
|
zap.L().Info("uploading usage data")
|
||||||
|
|
||||||
// Try to get the org name
|
|
||||||
orgName := ""
|
orgName := ""
|
||||||
orgNames, err := lm.modelDao.GetOrgs(ctx)
|
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
|
||||||
if err != nil {
|
if orgError != nil {
|
||||||
zap.S().Errorf("failed to get org data: %v", zap.Error(err))
|
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
|
||||||
} else {
|
}
|
||||||
if len(orgNames) != 1 {
|
if len(orgNames) == 1 {
|
||||||
zap.S().Errorf("expected one org but got %d orgs", len(orgNames))
|
orgName = orgNames[0].Name
|
||||||
} else {
|
|
||||||
orgName = orgNames[0].Name
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
usagesPayload := []model.Usage{}
|
usagesPayload := []model.Usage{}
|
||||||
for _, usage := range usages {
|
for _, usage := range usages {
|
||||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
|
zap.L().Error("error while decrypting usage data: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
usageData := model.Usage{}
|
usageData := model.Usage{}
|
||||||
err = json.Unmarshal(usageDataBytes, &usageData)
|
err = json.Unmarshal(usageDataBytes, &usageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
|
zap.L().Error("error while unmarshalling usage data: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,13 +184,13 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
for i := 1; i <= MaxRetries; i++ {
|
for i := 1; i <= MaxRetries; i++ {
|
||||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||||
if apiErr != nil && i == MaxRetries {
|
if apiErr != nil && i == MaxRetries {
|
||||||
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
zap.L().Error("retries stopped : %v", zap.Error(apiErr))
|
||||||
// not returning error here since it is captured in the failed count
|
// not returning error here since it is captured in the failed count
|
||||||
return
|
return
|
||||||
} else if apiErr != nil {
|
} else if apiErr != nil {
|
||||||
// sleeping for exponential backoff
|
// sleeping for exponential backoff
|
||||||
sleepDuration := RetryInterval * time.Duration(i)
|
sleepDuration := RetryInterval * time.Duration(i)
|
||||||
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
|
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err))
|
||||||
time.Sleep(sleepDuration)
|
time.Sleep(sleepDuration)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
@@ -205,7 +201,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
func (lm *Manager) Stop() {
|
func (lm *Manager) Stop() {
|
||||||
lm.scheduler.Stop()
|
lm.scheduler.Stop()
|
||||||
|
|
||||||
zap.S().Debug("sending usage data before shutting down")
|
zap.L().Info("sending usage data before shutting down")
|
||||||
// send usage before shutting down
|
// send usage before shutting down
|
||||||
lm.UploadUsage()
|
lm.UploadUsage()
|
||||||
|
|
||||||
|
|||||||
3
frontend/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
|
||||||
|
# Sentry Config File
|
||||||
|
.env.sentry-build-plugin
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM nginx:1.25.2-alpine
|
FROM nginx:1.26-alpine
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|||||||
@@ -4,10 +4,12 @@ const config: Config.InitialOptions = {
|
|||||||
clearMocks: true,
|
clearMocks: true,
|
||||||
coverageDirectory: 'coverage',
|
coverageDirectory: 'coverage',
|
||||||
coverageReporters: ['text', 'cobertura', 'html', 'json-summary'],
|
coverageReporters: ['text', 'cobertura', 'html', 'json-summary'],
|
||||||
|
collectCoverageFrom: ['src/**/*.{ts,tsx}'],
|
||||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
||||||
modulePathIgnorePatterns: ['dist'],
|
modulePathIgnorePatterns: ['dist'],
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||||
|
'\\.md$': '<rootDir>/__mocks__/cssMock.ts',
|
||||||
},
|
},
|
||||||
globals: {
|
globals: {
|
||||||
extensionsToTreatAsEsm: ['.ts'],
|
extensionsToTreatAsEsm: ['.ts'],
|
||||||
@@ -22,7 +24,7 @@ const config: Config.InitialOptions = {
|
|||||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
},
|
},
|
||||||
transformIgnorePatterns: [
|
transformIgnorePatterns: [
|
||||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios)/)',
|
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
|
||||||
],
|
],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
@@ -33,6 +35,14 @@ const config: Config.InitialOptions = {
|
|||||||
browsers: ['chromium', 'firefox', 'webkit'],
|
browsers: ['chromium', 'firefox', 'webkit'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
coverageThreshold: {
|
||||||
|
global: {
|
||||||
|
statements: 80,
|
||||||
|
branches: 65,
|
||||||
|
functions: 80,
|
||||||
|
lines: 80,
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export default config;
|
export default config;
|
||||||
|
|||||||
@@ -7,6 +7,7 @@
|
|||||||
*/
|
*/
|
||||||
import '@testing-library/jest-dom';
|
import '@testing-library/jest-dom';
|
||||||
import 'jest-styled-components';
|
import 'jest-styled-components';
|
||||||
|
import './src/styles.scss';
|
||||||
|
|
||||||
import { server } from './src/mocks-server/server';
|
import { server } from './src/mocks-server/server';
|
||||||
// Establish API mocking before all tests.
|
// Establish API mocking before all tests.
|
||||||
|
|||||||
@@ -19,7 +19,9 @@
|
|||||||
"playwright:codegen:local": "playwright codegen http://localhost:3301",
|
"playwright:codegen:local": "playwright codegen http://localhost:3301",
|
||||||
"playwright:codegen:local:auth": "yarn playwright:codegen:local --load-storage=tests/auth.json",
|
"playwright:codegen:local:auth": "yarn playwright:codegen:local --load-storage=tests/auth.json",
|
||||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||||
"commitlint": "commitlint --edit $1"
|
"commitlint": "commitlint --edit $1",
|
||||||
|
"test": "jest --coverage",
|
||||||
|
"test:changedsince": "jest --changedSince=develop --coverage --silent"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16.15.0"
|
"node": ">=16.15.0"
|
||||||
@@ -36,13 +38,20 @@
|
|||||||
"@mdx-js/loader": "2.3.0",
|
"@mdx-js/loader": "2.3.0",
|
||||||
"@mdx-js/react": "2.3.0",
|
"@mdx-js/react": "2.3.0",
|
||||||
"@monaco-editor/react": "^4.3.1",
|
"@monaco-editor/react": "^4.3.1",
|
||||||
"@signozhq/design-tokens": "0.0.6",
|
"@radix-ui/react-tabs": "1.0.4",
|
||||||
|
"@radix-ui/react-tooltip": "1.0.7",
|
||||||
|
"@sentry/react": "7.102.1",
|
||||||
|
"@sentry/webpack-plugin": "2.16.0",
|
||||||
|
"@signozhq/design-tokens": "0.0.8",
|
||||||
"@uiw/react-md-editor": "3.23.5",
|
"@uiw/react-md-editor": "3.23.5",
|
||||||
|
"@visx/group": "3.3.0",
|
||||||
|
"@visx/shape": "3.5.0",
|
||||||
|
"@visx/tooltip": "3.3.0",
|
||||||
"@xstate/react": "^3.0.0",
|
"@xstate/react": "^3.0.0",
|
||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"antd": "5.11.0",
|
"antd": "5.11.0",
|
||||||
"antd-table-saveas-excel": "2.2.1",
|
"antd-table-saveas-excel": "2.2.1",
|
||||||
"axios": "1.6.2",
|
"axios": "1.7.4",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^29.6.4",
|
"babel-jest": "^29.6.4",
|
||||||
"babel-loader": "9.1.3",
|
"babel-loader": "9.1.3",
|
||||||
@@ -67,6 +76,7 @@
|
|||||||
"fontfaceobserver": "2.3.0",
|
"fontfaceobserver": "2.3.0",
|
||||||
"history": "4.10.1",
|
"history": "4.10.1",
|
||||||
"html-webpack-plugin": "5.5.0",
|
"html-webpack-plugin": "5.5.0",
|
||||||
|
"http-proxy-middleware": "2.0.6",
|
||||||
"i18next": "^21.6.12",
|
"i18next": "^21.6.12",
|
||||||
"i18next-browser-languagedetector": "^6.1.3",
|
"i18next-browser-languagedetector": "^6.1.3",
|
||||||
"i18next-http-backend": "^1.3.2",
|
"i18next-http-backend": "^1.3.2",
|
||||||
@@ -75,11 +85,14 @@
|
|||||||
"less": "^4.1.2",
|
"less": "^4.1.2",
|
||||||
"less-loader": "^10.2.0",
|
"less-loader": "^10.2.0",
|
||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
"lucide-react": "0.288.0",
|
"lucide-react": "0.379.0",
|
||||||
"mini-css-extract-plugin": "2.4.5",
|
"mini-css-extract-plugin": "2.4.5",
|
||||||
"papaparse": "5.4.1",
|
"papaparse": "5.4.1",
|
||||||
|
"posthog-js": "1.160.3",
|
||||||
|
"rc-tween-one": "3.0.6",
|
||||||
"react": "18.2.0",
|
"react": "18.2.0",
|
||||||
"react-addons-update": "15.6.3",
|
"react-addons-update": "15.6.3",
|
||||||
|
"react-beautiful-dnd": "13.1.1",
|
||||||
"react-dnd": "16.0.1",
|
"react-dnd": "16.0.1",
|
||||||
"react-dnd-html5-backend": "16.0.1",
|
"react-dnd-html5-backend": "16.0.1",
|
||||||
"react-dom": "18.2.0",
|
"react-dom": "18.2.0",
|
||||||
@@ -97,8 +110,11 @@
|
|||||||
"react-syntax-highlighter": "15.5.0",
|
"react-syntax-highlighter": "15.5.0",
|
||||||
"react-use": "^17.3.2",
|
"react-use": "^17.3.2",
|
||||||
"react-virtuoso": "4.0.3",
|
"react-virtuoso": "4.0.3",
|
||||||
|
"overlayscrollbars-react": "^0.5.6",
|
||||||
|
"overlayscrollbars": "^2.8.1",
|
||||||
"redux": "^4.0.5",
|
"redux": "^4.0.5",
|
||||||
"redux-thunk": "^2.3.0",
|
"redux-thunk": "^2.3.0",
|
||||||
|
"rehype-raw": "7.0.0",
|
||||||
"stream": "^0.0.2",
|
"stream": "^0.0.2",
|
||||||
"style-loader": "1.3.0",
|
"style-loader": "1.3.0",
|
||||||
"styled-components": "^5.3.11",
|
"styled-components": "^5.3.11",
|
||||||
@@ -112,6 +128,7 @@
|
|||||||
"web-vitals": "^0.2.4",
|
"web-vitals": "^0.2.4",
|
||||||
"webpack": "5.88.2",
|
"webpack": "5.88.2",
|
||||||
"webpack-dev-server": "^4.15.1",
|
"webpack-dev-server": "^4.15.1",
|
||||||
|
"webpack-retry-chunk-load-plugin": "3.1.1",
|
||||||
"xstate": "^4.31.0"
|
"xstate": "^4.31.0"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
@@ -153,6 +170,7 @@
|
|||||||
"@types/papaparse": "5.3.7",
|
"@types/papaparse": "5.3.7",
|
||||||
"@types/react": "18.0.26",
|
"@types/react": "18.0.26",
|
||||||
"@types/react-addons-update": "0.14.21",
|
"@types/react-addons-update": "0.14.21",
|
||||||
|
"@types/react-beautiful-dnd": "13.1.8",
|
||||||
"@types/react-dom": "18.0.10",
|
"@types/react-dom": "18.0.10",
|
||||||
"@types/react-grid-layout": "^1.1.2",
|
"@types/react-grid-layout": "^1.1.2",
|
||||||
"@types/react-helmet-async": "1.0.3",
|
"@types/react-helmet-async": "1.0.3",
|
||||||
@@ -167,7 +185,7 @@
|
|||||||
"@types/webpack-dev-server": "^4.7.2",
|
"@types/webpack-dev-server": "^4.7.2",
|
||||||
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||||
"@typescript-eslint/parser": "^4.33.0",
|
"@typescript-eslint/parser": "^4.33.0",
|
||||||
"autoprefixer": "^9.0.0",
|
"autoprefixer": "10.4.19",
|
||||||
"babel-plugin-styled-components": "^1.12.0",
|
"babel-plugin-styled-components": "^1.12.0",
|
||||||
"compression-webpack-plugin": "9.0.0",
|
"compression-webpack-plugin": "9.0.0",
|
||||||
"copy-webpack-plugin": "^8.1.0",
|
"copy-webpack-plugin": "^8.1.0",
|
||||||
@@ -189,11 +207,12 @@
|
|||||||
"eslint-plugin-sonarjs": "^0.12.0",
|
"eslint-plugin-sonarjs": "^0.12.0",
|
||||||
"husky": "^7.0.4",
|
"husky": "^7.0.4",
|
||||||
"is-ci": "^3.0.1",
|
"is-ci": "^3.0.1",
|
||||||
"jest-playwright-preset": "^1.7.2",
|
|
||||||
"jest-styled-components": "^7.0.8",
|
"jest-styled-components": "^7.0.8",
|
||||||
"lint-staged": "^12.5.0",
|
"lint-staged": "^12.5.0",
|
||||||
"msw": "1.3.2",
|
"msw": "1.3.2",
|
||||||
|
"npm-run-all": "latest",
|
||||||
"portfinder-sync": "^0.0.2",
|
"portfinder-sync": "^0.0.2",
|
||||||
|
"postcss": "8.4.38",
|
||||||
"prettier": "2.2.1",
|
"prettier": "2.2.1",
|
||||||
"raw-loader": "4.0.2",
|
"raw-loader": "4.0.2",
|
||||||
"react-hooks-testing-library": "0.6.0",
|
"react-hooks-testing-library": "0.6.0",
|
||||||
@@ -210,7 +229,8 @@
|
|||||||
},
|
},
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
"*.(js|jsx|ts|tsx)": [
|
"*.(js|jsx|ts|tsx)": [
|
||||||
"eslint --fix"
|
"eslint --fix",
|
||||||
|
"sh scripts/typecheck-staged.sh"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"resolutions": {
|
"resolutions": {
|
||||||
@@ -218,6 +238,7 @@
|
|||||||
"@types/react-dom": "18.0.10",
|
"@types/react-dom": "18.0.10",
|
||||||
"debug": "4.3.4",
|
"debug": "4.3.4",
|
||||||
"semver": "7.5.4",
|
"semver": "7.5.4",
|
||||||
"xml2js": "0.5.0"
|
"xml2js": "0.5.0",
|
||||||
|
"phin": "^3.7.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
10
frontend/public/Icons/alert_emoji.svg
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
<svg width="33" height="32" viewBox="0 0 33 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M4.99715 27.2944C4.70156 27.2944 4.74156 27.6477 4.74156 28.3143C4.74156 28.981 4.70156 29.3543 5.05493 29.3543C5.40831 29.3543 27.7778 29.3143 28.0134 29.2965C28.2489 29.2765 28.1889 28.4143 28.1889 28.081C28.1889 27.6699 28.2467 27.3166 27.9156 27.2966C27.5822 27.2766 5.11494 27.2944 4.99715 27.2944Z" fill="#ED6D30"/>
|
||||||
|
<path d="M5.07275 21.8602L5.09498 27.3132L27.7956 27.291L27.8467 21.7135L27.3466 21.1536L5.255 21.1158L5.07275 21.8602Z" fill="#F78A51"/>
|
||||||
|
<path d="M5.53728 21.4707L5.07278 21.8596L5.07056 22.724C5.07056 22.724 5.22169 22.8306 5.37282 22.7551C5.52395 22.6795 5.73508 22.5329 5.92177 22.5173C6.21959 22.4951 6.19514 22.7795 6.48184 22.7795C6.76855 22.7795 7.02858 22.4929 7.27083 22.4929C7.51308 22.4929 7.62421 22.7995 7.88202 22.784C8.13983 22.7684 8.28429 22.5084 8.60655 22.5173C8.86436 22.524 8.90881 22.784 9.22663 22.784C9.54445 22.784 9.70669 22.4818 9.97784 22.4818C10.249 22.4818 10.3379 22.8018 10.6401 22.8018C10.9424 22.8018 11.0246 22.4818 11.3713 22.4818C11.7181 22.4818 11.6892 22.784 11.9759 22.7529C12.2626 22.7218 12.2915 22.4729 12.6382 22.4573C12.9849 22.4418 13.0204 22.784 13.3227 22.784C13.625 22.784 13.6161 22.5373 13.8739 22.5373C14.1317 22.5373 18.9145 22.5262 19.0968 22.5262C19.279 22.5262 19.559 22.8462 19.8613 22.8462C20.1636 22.8462 20.0791 22.504 20.4103 22.4951C20.6081 22.4907 20.9925 22.824 21.2192 22.824C21.4459 22.824 21.5282 22.4818 21.7838 22.4662C22.0393 22.4507 22.4194 22.844 22.7217 22.8129C23.0239 22.7818 22.8728 22.4796 23.0995 22.4507C23.3262 22.4196 23.7796 22.784 24.0818 22.7973C24.3841 22.8129 24.1885 22.404 24.5041 22.404C24.8197 22.404 25.0642 22.7507 25.3953 22.7662C25.7265 22.7818 25.502 22.4196 25.8332 22.3884C26.1643 22.3573 26.4066 22.8418 26.7244 22.8106C27.0422 22.7795 26.9066 22.4329 27.1778 22.4173C27.4489 22.4018 27.8267 22.644 27.8267 22.644L27.8401 21.7063L14.7807 17.582L5.53728 21.4707Z" fill="#ED6D30"/>
|
||||||
|
<path d="M13.8049 29.3267C13.8049 29.3267 13.8605 22.7804 13.8516 22.6204C13.8405 22.4271 14.0116 22.3804 14.1494 22.3804C14.2871 22.3804 18.8558 22.3804 18.9935 22.3804C19.1313 22.3804 19.2113 22.4827 19.2224 22.6093C19.2335 22.736 19.2002 29.3156 19.2002 29.3156L13.8049 29.3267Z" fill="#51362F"/>
|
||||||
|
<path d="M4.15465 18.7244C4.15465 18.7244 3.23898 20.7487 3.24787 20.902C3.25676 21.0553 3.51234 21.9864 3.92128 22.0109C4.48135 22.0442 4.58359 21.5531 4.67693 21.5531C4.77028 21.5531 4.89474 22.0331 5.21478 22.0797C5.58816 22.1331 5.85708 21.5331 6.00154 21.5331C6.14601 21.5331 6.21713 22.0553 6.55495 22.0553C6.89277 22.0553 7.25281 21.4909 7.38616 21.502C7.51951 21.5131 7.64842 22.102 7.92401 22.102C8.20182 22.102 8.47296 21.5998 8.71299 21.5753C8.83745 21.5642 8.95525 22.1375 9.18194 22.1464C9.40864 22.1575 9.79535 21.5531 9.99093 21.5531C10.1865 21.5531 10.3399 22.1775 10.6377 22.1486C10.9355 22.1197 11.3378 21.5642 11.48 21.5642C11.6222 21.5642 11.7778 22.1264 12.0112 22.1375C12.2223 22.1464 12.5713 21.6087 12.7135 21.5998C12.8557 21.5909 13.0269 22.1486 13.2625 22.1486C13.498 22.1486 13.7536 21.5442 13.9492 21.5331C14.1448 21.522 14.227 22.102 14.4626 22.102C14.6982 22.102 15.0471 21.5175 15.2627 21.5087C15.4783 21.4975 15.5961 22.0686 15.8117 22.0686C16.0272 22.0686 16.2673 21.4887 16.4206 21.482C16.6584 21.4731 16.8096 22.0464 17.1385 22.0575C17.4674 22.0686 17.6008 21.5042 17.8564 21.5042C18.1119 21.5042 18.1853 22.0375 18.472 22.0486C18.7587 22.0597 18.9943 21.4953 19.2099 21.5042C19.4254 21.5153 19.5677 22.0264 19.8055 22.0264C20.0433 22.0264 20.2767 21.5042 20.4522 21.5131C20.6256 21.5242 20.8634 22.0464 21.099 22.0464C21.3346 22.0464 21.5302 21.5064 21.6435 21.502C21.8613 21.4953 22.0836 22.0664 22.3102 22.0464C22.5369 22.0264 22.7992 21.4642 22.9948 21.4731C23.1904 21.4842 23.4904 22.1108 23.726 22.0909C23.9616 22.0709 24.1616 21.4753 24.3772 21.4842C24.5928 21.4931 24.7661 22.0331 25.0395 22.0331C25.2906 22.0331 25.4306 21.5175 25.6573 21.5064C25.884 21.4953 26.0952 21.9997 26.3308 21.9753C26.5663 21.9509 26.6619 21.482 26.8686 21.4731C27.0731 21.462 27.3753 22.0042 27.6731 21.9931C27.971 21.982 28.1243 21.562 28.2888 21.5531C28.4532 21.5442 28.5955 22.0109 28.9955 22.0042C29.3556 21.9997 29.8267 21.3264 29.7334 20.8554C29.6401 20.3843 28.3599 18.5066 28.3599 18.5066L4.15465 18.7244Z" fill="#6C4D43"/>
|
||||||
|
<path d="M6.09496 13.357C6.09496 13.357 4.90148 15.0328 4.1925 16.5641C3.48352 18.0954 3.21016 19.0022 3.16571 19.8956C3.12126 20.7691 3.24794 20.9024 3.24794 20.9024L4.54366 19.4867C4.54366 19.4867 4.55699 20.8247 4.65256 20.838C4.74813 20.8513 5.74603 19.4578 5.8127 19.4445C5.8816 19.4311 5.8816 20.8513 5.97717 20.8513C6.07274 20.8513 7.09731 19.4178 7.16621 19.4178C7.2351 19.4178 7.26177 20.838 7.34401 20.838C7.42624 20.838 8.35524 19.3911 8.42414 19.4045C8.49304 19.4178 8.73751 20.9202 8.81975 20.9202C8.90198 20.9202 9.76209 19.3911 9.85765 19.3911C9.95322 19.3911 10.0621 20.9758 10.171 20.9758C10.2799 20.9758 11.1267 19.4467 11.1956 19.4467C11.2645 19.4467 11.5379 20.9625 11.6468 20.9491C11.7557 20.9358 12.5069 19.4467 12.5758 19.4734C12.6447 19.5 12.8225 20.9358 12.9447 20.9358C13.0669 20.9358 13.7226 19.4334 13.8315 19.4334C13.9404 19.4334 14.216 20.8913 14.2982 20.8913C14.3804 20.8913 15.0627 19.4289 15.145 19.4156C15.2272 19.4023 15.665 21.0269 15.8006 21.0269C15.9362 21.0269 16.3474 19.5245 16.4429 19.5378C16.5385 19.5512 17.1808 20.9713 17.2341 20.9713C17.2875 20.9713 17.7675 19.4823 17.8209 19.4823C17.8742 19.4823 18.5165 20.8335 18.6121 20.8491C18.7076 20.8624 19.0632 19.4978 19.1321 19.5245C19.201 19.5512 19.8567 20.958 19.9389 20.9713C20.0211 20.9847 20.3078 19.4956 20.3901 19.4956C20.4723 19.4956 21.3724 21.1336 21.4413 21.1202C21.5102 21.1069 21.5925 19.4667 21.6725 19.4534C21.7547 19.44 22.8326 21.0647 22.9148 21.0513C22.9971 21.038 22.9548 19.3978 23.0104 19.3978C23.066 19.3978 23.9527 20.9269 24.075 20.9136C24.1972 20.9002 24.3061 19.48 24.3884 19.48C24.4706 19.48 25.4529 21.1469 25.5774 21.1336C25.7019 21.1202 25.6041 19.5756 25.6596 19.5623C25.7152 19.5489 26.8198 20.9558 26.8753 20.9424C26.9309 20.9291 26.9153 19.4267 27.0109 19.4134C27.1065 19.4 28.131 20.8758 28.2266 20.8469C28.3222 20.8202 28.3355 19.3445 28.3911 19.3311C28.4466 19.3178 29.7268 20.8535 29.7268 20.8535C29.7268 20.8535 29.9757 19.5178 29.5357 18.2377C29.0956 16.9575 28.0266 15.1595 27.5087 14.395C26.9931 13.6304 26.6909 13.277 26.6909 13.277L14.0648 11.6591L6.09496 13.357Z" fill="#A37F69"/>
|
||||||
|
<path d="M10.4736 8.22084C10.4736 8.22084 8.78668 9.88105 7.98214 10.8412C7.17759 11.8013 6.09301 13.3548 6.09301 13.3548C6.09301 13.3548 5.69963 15.1728 5.8152 15.1862C5.93299 15.1995 7.08647 13.4615 7.19093 13.4726C7.29539 13.4859 7.02202 15.2239 7.12648 15.2506C7.23093 15.2773 8.51554 13.4482 8.57999 13.4348C8.64444 13.4215 8.3733 15.2373 8.4622 15.2639C8.5511 15.2906 9.85126 13.4482 9.92905 13.4482C10.0068 13.4482 10.1113 15.1484 10.2135 15.1484C10.3158 15.1484 11.1736 13.4237 11.2514 13.4348C11.3292 13.4482 11.5115 15.2128 11.6404 15.2373C11.7693 15.2639 12.3671 13.4082 12.4716 13.3948C12.576 13.3815 12.8339 15.3417 12.9516 15.3417C13.0694 15.3417 13.6917 13.4215 13.7695 13.4215C13.8473 13.4215 14.0429 15.3417 14.1718 15.3417C14.3007 15.3417 14.8852 13.3837 14.963 13.3837C15.0408 13.3837 15.5986 15.2639 15.6898 15.2395C15.7809 15.2128 16.2743 13.3593 16.3654 13.3704C16.4565 13.3837 16.8833 15.1862 17.041 15.2128C17.1966 15.2395 17.6122 13.4615 17.7411 13.4615C17.87 13.4615 18.2079 15.4329 18.3634 15.4329C18.519 15.4329 18.8702 13.4615 18.948 13.4615C19.0257 13.4615 19.7392 15.4084 19.857 15.4195C19.9747 15.4329 20.1037 13.5637 20.2459 13.5504C20.3881 13.5371 21.1549 15.4195 21.2327 15.4062C21.3105 15.3929 21.3749 13.5637 21.4527 13.5504C21.5305 13.5371 22.3995 15.2639 22.5417 15.2639C22.684 15.2639 22.5929 13.4726 22.724 13.4859C22.8529 13.4993 24.1508 15.3662 24.2686 15.3662C24.3864 15.3662 23.9308 13.4193 24.0353 13.3948C24.1397 13.3682 25.5021 15.4706 25.6443 15.4306C25.7866 15.3906 25.2821 13.5237 25.371 13.4971C25.4621 13.4704 26.8756 15.3262 27.0067 15.2751C27.1356 15.2239 26.7 13.277 26.7 13.277C26.7 13.277 25.3976 11.5768 24.7242 10.7478C24.0486 9.91661 22.9862 8.81425 22.9862 8.81425L17.7478 6.19836L10.4736 8.22084Z" fill="#BD9177"/>
|
||||||
|
<path d="M10.4734 8.2202C10.4734 8.2202 9.83556 9.42236 9.96447 9.49791C10.0934 9.57346 11.6736 8.05576 11.8269 8.09354C11.9803 8.13131 11.3157 9.70012 11.5336 9.75123C11.7514 9.80234 12.7959 8.0291 12.9248 8.05354C13.0515 8.07798 12.6559 9.77567 12.8604 9.84011C13.0649 9.90455 13.945 7.9891 14.085 8.01576C14.225 8.04021 14.1872 9.929 14.3139 9.94233C14.4406 9.95566 15.0918 8.10465 15.1807 8.10465C15.2696 8.10465 15.5252 10.0579 15.6785 10.069C15.8319 10.0823 16.2897 8.03576 16.3919 8.03576C16.4942 8.03576 17.0053 9.96677 17.172 9.96677C17.3387 9.96677 17.4387 8.01799 17.5276 7.98021C17.6165 7.94244 18.3633 9.85122 18.5767 9.85122C18.7611 9.85122 18.4478 7.95132 18.5633 7.92466C18.6789 7.90021 19.7368 9.889 19.9546 9.87789C20.1724 9.86456 19.7946 8.02243 19.8968 8.02243C19.9991 8.02243 21.1681 9.86456 21.3592 9.86456C21.5504 9.86456 20.9592 7.99132 21.0747 7.96466C21.1903 7.94021 22.9305 9.60679 23.0328 9.58013C23.135 9.55568 22.9817 8.81128 22.9817 8.81128C22.9817 8.81128 18.7833 4.49595 16.4342 4.48484C14.0339 4.47151 10.4734 8.2202 10.4734 8.2202Z" fill="#D2A590"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 9.1 KiB |
1
frontend/public/Icons/awwSnap.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="33" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M15.91 28.675c-6.199 0-12.888-3.888-12.888-12.421S9.711 3.832 15.911 3.832c3.444 0 6.621 1.134 8.977 3.2 2.555 2.267 3.91 5.466 3.91 9.222 0 3.755-1.355 6.933-3.91 9.2-2.356 2.066-5.555 3.221-8.977 3.221z" fill="url(#prefix__paint0_radial_2122_6520)"/><path d="M26.552 8.87c1.185 1.91 1.803 4.186 1.803 6.717 0 3.756-1.356 6.933-3.911 9.2-2.356 2.066-5.556 3.222-8.978 3.222-4.013 0-8.221-1.634-10.706-5.098 2.391 3.924 6.889 5.764 11.15 5.764 3.423 0 6.623-1.155 8.978-3.222 2.555-2.266 3.911-5.444 3.911-9.2 0-2.83-.771-5.346-2.247-7.383z" fill="#EB8F00"/><path d="M20.123 22.905c0 1.685-1.846 2.667-4.124 2.667-2.277 0-4.124-.989-4.124-2.667 0-1.677 1.847-3.522 4.124-3.522 2.278 0 4.124 1.838 4.124 3.522zM12.06 14.852l1.88-1.748c.267-.331.307-.778.038-1.045-.353-.355-.98-.269-1.32.136-.018.033-.03.042-.049.075l-1.333 1.938-1.804-1.682c-.027-.03-.042-.034-.067-.062-.42-.32-1.05-.267-1.315.157-.207.32-.07.745.264 1.011l2.313 1.372-1.96 1.833c-.262.326-.31.77-.04 1.044.351.358.978.276 1.32-.127.018-.033.031-.042.051-.075l1.405-2.031 1.706 1.609c.027.029.043.035.067.064.418.322 1.049.273 1.318-.149.206-.32.07-.746-.26-1.013l-2.213-1.307zM20.61 14.852l-1.879-1.748c-.267-.331-.307-.778-.036-1.045.354-.355.978-.269 1.318.136.018.033.034.042.051.075l1.334 1.938 1.806-1.682c.025-.03.04-.034.065-.062.422-.32 1.05-.267 1.317.157.205.32.067.745-.266 1.011L22 15.004l1.96 1.833c.268.33.313.775.042 1.044-.349.358-.976.276-1.318-.127-.02-.033-.033-.042-.051-.075l-1.404-2.031-1.71 1.609c-.024.029-.04.035-.066.064-.418.322-1.046.273-1.315-.149-.21-.32-.074-.746.257-1.013l2.216-1.307zM11.911 8.696c.511.044.711-.645.178-.8a4.07 4.07 0 00-1.289-.133A4.596 4.596 0 007.689 9.14c-.378.4.156.89.556.6a5.829 5.829 0 013.666-1.044zM20.044 8.696a5.85 5.85 0 013.689 1.044c.4.29.933-.2.555-.6a4.645 4.645 0 00-3.11-1.377 4.07 4.07 0 00-1.29.133.408.408 0 00-.282.504c.053.194.24.318.438.296z" fill="#422B0D"/><defs><radialGradient id="prefix__paint0_radial_2122_6520" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(15.91 16.254) scale(12.657)"><stop offset=".5" stop-color="#FDE030"/><stop offset=".92" stop-color="#F7C02B"/><stop offset="1" stop-color="#F4A223"/></radialGradient></defs></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
frontend/public/Icons/cable-car.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_2022_1972)" stroke="#fff" stroke-width="1.333" stroke-linecap="round" stroke-linejoin="round"><path d="M6.667 2h.006M9.333 1.333h.007M1.333 6l13.334-3.333M8 8V4.333M11.333 8H4.667a2 2 0 00-2 2v2.667a2 2 0 002 2h6.666a2 2 0 002-2V10a2 2 0 00-2-2zM6 8v3.333M10 8v3.333M2.667 11.334h10.666"/></g><defs><clipPath id="prefix__clip0_2022_1972"><path fill="#fff" d="M0 0h16v16H0z"/></clipPath></defs></svg>
|
||||||
|
After Width: | Height: | Size: 507 B |
1
frontend/public/Icons/configure.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g stroke="#C0C1C3" stroke-width="1.333" stroke-linecap="round"><path d="M9.71 4.745a.576.576 0 000 .806l.922.922a.576.576 0 00.806 0l2.171-2.171a3.455 3.455 0 01-4.572 4.572l-3.98 3.98a1.222 1.222 0 11-1.727-1.728l3.98-3.98a3.455 3.455 0 014.572-4.572L9.717 4.739l-.006.006z" stroke-linejoin="round"/><path d="M4 7L2.527 5.566a1.333 1.333 0 01-.013-1.898l.81-.81a1.333 1.333 0 011.991.119L5.333 3M10.75 10.988l1.179 1.178m0 0l-.138.138a.833.833 0 00.387 1.397v0a.833.833 0 00.792-.219l.446-.446a.833.833 0 00.176-.917v0a.833.833 0 00-1.355-.261l-.308.308z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 644 B |
1
frontend/public/Icons/dashboard_emoji.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M13.72 12.839l-9.054.92s.05.649.236.798c.178.142 5.617.066 11.048.088 5.433.023 10.82.125 10.944.072.249-.107.249-.992.249-.992l-13.424-.886zM16.55 7.787l-12.623-.32s.275.61.637.813c.523.29 3.71.889 11.518.918 7.808.028 10.635-.4 11.317-.678.58-.238 1.215-1.576 1.215-1.576l-12.064.843z" fill="#8A1E0C"/><path d="M21.95 8.658v1.335l2.176-.087V8.542l-2.176.116z" fill="#8A1E0C"/><path d="M21.948 9.566h2.177v16.797l-2.206.294.029-17.09z" fill="#EB2901"/><path d="M21.355 26.19c-.111.193-.111 2.297-.007 2.444.105.147 3.242.104 3.326 0 .085-.104.063-2.38 0-2.464-.062-.085-3.235-.125-3.32.02z" fill="#474C4F"/><path d="M8.462 9.85V8.488l2.042.125v1.22l-2.042.017z" fill="#8A1E0C"/><path d="M8.462 9.55l-.038 17.051 2.08-.207V9.566l-2.042-.015z" fill="#EB2901"/><path d="M7.804 25.919c-.073.073-.147 2.36-.02 2.464.125.104 3.14.129 3.244.024.105-.104.085-2.304.023-2.43-.063-.127-3.142-.163-3.247-.058z" fill="#474C4F"/><path d="M14.788 8.107v4.876l2.393-.33V8.108h-2.393z" fill="#EB2901"/><path d="M27.067 11.978c-.115-.16-.482-.138-.482-.138l-1.137-.013c.002-.398-.01-.913-.078-.996-.116-.137-4.542-.09-4.702.047-.091.078-.11.527-.107.898-2.738-.027-5.99-.058-8.83-.076 0-.384-.012-.849-.078-.915-.116-.116-4.22-.185-4.38-.07-.113.083-.136.647-.138.97-1.384.002-2.275.013-2.34.04-.322.137-.137 2.042-.137 2.042l22.476.16c.002.002.049-1.787-.067-1.95z" fill="#EB2901"/><path d="M3.93 6.942s-.646-.34-1.377-1.573c-.509-.858-.595-1.658-.387-1.778.21-.12 2.154 1.08 5.745 1.616a60.81 60.81 0 008.173.644c2.884.027 5.717-.135 8.397-.644 3.62-.689 4.906-1.436 5.264-1.316.36.12-.109 1.227-.369 1.78-.178.376-.944 1.77-1.515 1.87-.411.072-19.953-.09-19.953-.09l-3.977-.509z" fill="#474C4F"/><path d="M3.31 5.724c-.108.137-.057.457.212 1.06.107.237.415.782.529.917 0 0 2.982.756 11.977.7 8.995-.055 12.108-.62 12.108-.62s.911-1.277.745-1.32c-.096-.024-4.847.98-12.909.898C7.911 7.277 3.311 5.724 3.311 5.724z" fill="#EB2901"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.0 KiB |
1
frontend/public/Icons/dashboards.svg
Normal file
|
After Width: | Height: | Size: 5.2 KiB |
1
frontend/public/Icons/emptyState.svg
Normal file
|
After Width: | Height: | Size: 5.6 KiB |
1
frontend/public/Icons/group.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g stroke="#C0C1C3" stroke-width="1.333" stroke-linecap="round" stroke-linejoin="round"><path d="M2 4.667V3.333C2 2.6 2.6 2 3.333 2h1.334M11.333 2h1.334C13.4 2 14 2.6 14 3.333v1.334M14 11.334v1.333C14 13.4 13.4 14 12.667 14h-1.334M4.667 14H3.333C2.6 14 2 13.4 2 12.667v-1.333M8.667 4.667H5.333a.667.667 0 00-.666.666v2c0 .368.298.667.666.667h3.334a.667.667 0 00.666-.667v-2a.667.667 0 00-.666-.667zM10.667 8H7.333a.667.667 0 00-.666.667v2c0 .368.298.666.666.666h3.334a.667.667 0 00.666-.666v-2A.667.667 0 0010.667 8z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 604 B |
1
frontend/public/Icons/groupBy.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4344_1236)" stroke="#C0C1C3" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M4.667 1.167H2.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V2.333c0-.644-.522-1.166-1.166-1.166zM8.167 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M11.667 1.167a1.17 1.17 0 011.166 1.166v2.334a1.17 1.17 0 01-1.166 1.166M5.833 10.5H2.917c-.992 0-1.75-.758-1.75-1.75v-.583"/><path d="M4.083 12.25l1.75-1.75-1.75-1.75M11.667 8.167H9.333c-.644 0-1.166.522-1.166 1.166v2.334c0 .644.522 1.166 1.166 1.166h2.334c.644 0 1.166-.522 1.166-1.166V9.333c0-.644-.522-1.166-1.166-1.166z"/></g><defs><clipPath id="prefix__clip0_4344_1236"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>
|
||||||
|
After Width: | Height: | Size: 878 B |
1
frontend/public/Icons/landscape.svg
Normal file
|
After Width: | Height: | Size: 6.1 KiB |
BIN
frontend/public/Icons/loading-plane.gif
Normal file
|
After Width: | Height: | Size: 88 KiB |
1
frontend/public/Icons/redis-logo.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="24" height="24" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M23.06 17.526c-1.281.668-7.916 3.396-9.328 4.132-1.413.736-2.198.73-3.314.196C9.303 21.32 2.242 18.468.97 17.86c-.636-.303-.97-.56-.97-.802v-2.426s9.192-2.001 10.676-2.534c1.484-.532 1.999-.551 3.262-.089 1.263.463 8.814 1.826 10.062 2.283v2.391c0 .24-.288.503-.94.843z" fill="#912626"/><path d="M23.06 15.114c-1.281.668-7.916 3.396-9.329 4.132-1.412.737-2.197.73-3.313.196C9.302 18.91 2.242 16.056.97 15.45c-1.272-.608-1.298-1.027-.049-1.516 1.25-.49 8.271-3.244 9.755-3.776 1.484-.533 1.999-.552 3.262-.09 1.263.463 7.858 3.088 9.106 3.546 1.248.457 1.296.834.015 1.501z" fill="#C6302B"/><path d="M23.06 13.6c-1.281.668-7.916 3.396-9.328 4.133-1.413.736-2.198.73-3.314.196S2.242 14.543.97 13.935c-.636-.304-.97-.56-.97-.802v-2.426s9.192-2.001 10.676-2.534c1.484-.532 1.999-.551 3.262-.089C15.2 8.547 22.752 9.91 24 10.366v2.392c0 .24-.288.503-.94.843z" fill="#912626"/><path d="M23.06 11.19c-1.281.667-7.916 3.395-9.329 4.131-1.412.737-2.197.73-3.313.196-1.116-.533-8.176-3.386-9.448-3.993-1.272-.608-1.298-1.027-.049-1.516 1.25-.49 8.271-3.244 9.755-3.776 1.484-.533 1.999-.552 3.262-.09 1.263.463 7.858 3.088 9.106 3.545 1.248.458 1.296.835.015 1.502z" fill="#C6302B"/><path d="M23.06 9.53c-1.281.668-7.916 3.396-9.328 4.132-1.413.737-2.198.73-3.314.196-1.116-.533-8.176-3.386-9.448-3.993C.334 9.56 0 9.305 0 9.062V6.636s9.192-2 10.676-2.533c1.484-.533 1.999-.552 3.262-.09C15.2 4.477 22.752 5.84 24 6.297v2.392c0 .24-.288.502-.94.842z" fill="#912626"/><path d="M23.06 7.118c-1.281.668-7.916 3.396-9.329 4.132-1.412.737-2.197.73-3.313.196C9.303 10.913 2.242 8.061.97 7.453-.302 6.845-.328 6.427.921 5.937c1.25-.489 8.271-3.244 9.755-3.776 1.484-.532 1.999-.552 3.262-.089 1.263.463 7.858 3.088 9.106 3.545 1.248.457 1.296.834.015 1.501z" fill="#C6302B"/><path d="M14.933 4.758l-2.064.215-.462 1.111-.746-1.24L9.28 4.63l1.778-.641-.534-.985 1.665.651 1.569-.513-.424 1.017 1.6.6zm-2.649 5.393l-3.85-1.597 5.517-.847-1.667 2.444zM6.945 5.376c1.63 0 2.95.512 2.95 1.143 0 .632-1.32 1.144-2.95 1.144-1.629 0-2.95-.512-2.95-1.144 0-.63 1.321-1.143 2.95-1.143z" fill="#fff"/><path d="M17.371 5.062l3.266 1.29-3.263 1.29-.003-2.58z" fill="#621B1C"/><path d="M13.758 6.492l3.613-1.43.003 2.58-.354.139-3.262-1.29z" fill="#9A2928"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
frontend/public/Icons/solid-x-circle.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_4062_7291)" stroke-width="1.167" stroke-linecap="round" stroke-linejoin="round"><path d="M7 12.833A5.833 5.833 0 107 1.167a5.833 5.833 0 000 11.666z" fill="#E5484D" stroke="#E5484D"/><path d="M8.75 5.25l-3.5 3.5M5.25 5.25l3.5 3.5" stroke="#121317"/></g><defs><clipPath id="prefix__clip0_4062_7291"><path fill="#fff" d="M0 0h14v14H0z"/></clipPath></defs></svg>
|
||||||
|
After Width: | Height: | Size: 467 B |
1
frontend/public/Icons/tetra-pack.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="33" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M14.309 13.108l-6.704-3.32s-.016-.317.284-.477c.302-.16 5.053-2.107 5.435-2.107.383 0 2.62.431 4.249.793 1.629.363 5.933 1.287 5.953 1.57.02.281-4.404 4.806-4.404 4.806l-4.813-1.265z" fill="#C3FECE"/><path d="M20.423 11.037s-2.811-.826-5.546-1.469c-1.274-.3-5.016-1.084-5.016-1.084s.398-.173.698-.3c.305-.127.547-.193.547-.193s2.44.486 4.253.873c2.453.522 5.886 1.547 5.966 1.709.082.16-.902.464-.902.464z" fill="#fff"/><path d="M14.98 10.26c-.598.415-.011.666 1.09.924 1.207.282 2.127.698 2.903.247.7-.405-1.014-.845-1.8-1.014-.6-.129-1.731-.478-2.193-.158z" fill="#ACB1B2"/><path d="M17.17 11.095c-.005 0 .02-4.869.02-5.049 0-.18-.203-.342.02-.724.222-.382.804-.342.804-.342s2.416-.702 3.38-.945c.964-.242 3.098-.804 3.098-.804l.142 1.22s-2.236.631-3.342.913c-1.107.282-2.616.745-2.616.745l-.222.202.064 4.757s-.206.231-.668.231c-.45-.002-.68-.204-.68-.204z" fill="#FFD816"/><path d="M24.095 3.855c.018.38.22.616.46.616.24 0 .404-.307.369-.707-.038-.398-.296-.58-.516-.506-.22.073-.327.32-.313.597zM18.46 6.422a.209.209 0 01-.123-.038l-1.153-.769a.225.225 0 01-.063-.309.222.222 0 01.31-.062l1.153.769a.224.224 0 01.062.309.228.228 0 01-.187.1z" fill="#FEB804"/><path d="M18.636 6.235a.225.225 0 01-.178-.089c-.295-.393-.633-.84-.693-.909a.225.225 0 01-.031-.284.222.222 0 01.309-.062c.04.027.062.042.771.986.073.098.007.238-.091.312-.04.03-.04.046-.087.046z" fill="#FEB804"/><path d="M18.365 6.609c-.01 0-.022 0-.035-.003l-1.111-.175a.221.221 0 11.069-.438l1.11.176c.12.02.225.042.205.164-.016.107-.129.276-.238.276z" fill="#FEB804"/><path d="M7.596 9.764c.353 0 3.188.744 4.65 1.013 1.463.27 5.878 1.314 6.027 1.342.149.03.12 1.94.12 1.94s2.089 10.8 2.029 11.309c-.06.506-1.431 4.415-1.431 4.415s-.807.12-2.865-.478c-2.057-.598-7.488-2.089-7.817-2.506-.329-.418-.12-5.938-.298-9.338-.182-3.402-.415-7.697-.415-7.697z" fill="#79DD8A"/><path d="M24.06 27.036c.113-.375-.518-4.402-.607-8.101-.089-3.698.229-9.324.076-9.369-.154-.042-5.256 2.553-5.256 2.553s-.022 3.671.04 7.133c.08 4.48.438 10.41.676 10.53.238.12 2.302-1.035 2.924-1.372 1.102-.598 2.058-1.074 2.147-1.374z" fill="#02AB46"/><path d="M20.408 13.82l.011-2.787.914-.45.026 3.056-.422.74-.529-.56z" fill="#DBDFE1"/><path d="M12.322 14.797c-1.973-.211-3.34 1.549-3.233 3.842.127 2.709 1.91 4.704 3.842 5.102 1.93.398 3.802-.44 3.842-3.402.044-3.087-2.669-5.353-4.451-5.542z" fill="#FEFEFD"/><path d="M13.637 17.27s-.4-1.344-1.602-.986c-1.202.357-1.853 2.973.187 4.15 1.96 1.131 3.764-.944 3.133-2.288-.574-1.227-1.718-.876-1.718-.876z" fill="#EF5B44"/><path d="M13.18 15.626c-.136.049-.243.602-.1 1.13.106.396.446.939.643.903.158-.029.278-.651.13-1.173-.174-.602-.516-.918-.674-.86z" fill="#B8CF17"/><path d="M13.15 18.746c-.564-.171-1.2 1.769-.057 2.977 1.26 1.331 2.73.158 2.69-.1-.057-.358-1.044-.615-1.53-1.215-.487-.605-.774-1.562-1.102-1.662z" fill="#FD8F01"/><path d="M11.346 18.417s.113-.849-.673-.802c-.76.046-.574.944-.574.944s-.633.076-.526.778c.08.53.64.524.64.524s-.616.242-.336.945c.249.624.822.373.822.373s-.21.609.287.93c.42.272.787.043.787.043s-.023.52.557.616c.703.115 1.007-.74.507-1.136-.38-.3-.724-.067-.724-.067s.07-.166.004-.357c-.045-.125-.116-.171-.116-.171s.616-.058.516-.758c-.1-.702-.716-.616-.716-.616s.358-.286.216-.802c-.14-.518-.671-.444-.671-.444z" fill="#A281D0"/><path d="M21.04 14.595c-.511 0-2.691-2.167-2.711-2.189a.222.222 0 01.024-.313.224.224 0 01.314.022c.14.155 1.806 1.702 2.286 2 .311-.465 1.322-2.498 2.191-4.333a.224.224 0 01.296-.107.223.223 0 01.106.296c-2.142 4.526-2.353 4.586-2.466 4.617-.013.007-.027.007-.04.007z" fill="#2D802D"/></svg>
|
||||||
|
After Width: | Height: | Size: 3.6 KiB |
1
frontend/public/Icons/tools.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="14" height="14" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1.305 13.063c.74.739 1.637.482 2.156-.109.53-.604.813-.956.813-.956.66-.973 3.392-4.227 5.724-6.568a2.638 2.638 0 002.74-.434 2.648 2.648 0 00.922-2.041.155.155 0 00-.23-.132l-1.607.927a1.64 1.64 0 01-1.076-1.864l1.6-.923a.153.153 0 00.077-.134.153.153 0 00-.077-.133 2.65 2.65 0 00-3.66 3.563C6.11 6.826 2.966 9.604 2.15 10.223c0 0-.492.356-.962.84-.464.476-.636 1.245.117 1.999zm.542-1.137a.592.592 0 111.184 0 .592.592 0 01-1.184 0z" fill="#82AEC0"/><path d="M8.334 4.61l.353-.35a2.63 2.63 0 01-.212-2.039c.073-.12.189-.249.262-.171-.03.946.245 1.931.902 2.611.327.338.752.582 1.207.696.224.057.458.082.69.069.137-.008.519-.149.596-.044v.004a2.656 2.656 0 01-2.135.043 38.176 38.176 0 00-1.903 2.05c.262-.495 1.034-1.408 1.241-1.757a.412.412 0 00-.036-.464c-.207-.255-.633-.493-.965-.649z" fill="#2F7889"/><path d="M5.186 8.529c.06-.062.004-.167-.08-.148-.158.035-.386.125-.657.345-.531.43-1.934 1.595-2.107 1.825-.173.23.522-.003.767-.047.2-.036 1.602-1.48 2.077-1.975zM10.048 1.104c-.296.212-.563.465-.84.701-.072.061-.177.122-.25.065-.08-.064-.03-.191.03-.274C9.512.874 10.493.358 11.442.563c-.5.161-.95.223-1.395.541z" fill="#B9E4EA"/><path d="M12.408 3.583a2.1 2.1 0 01-.371.19c-.112.031-.43-.092-.522-.166l1.183-.772c.043-.028.087-.056.137-.072a.546.546 0 01.185-.014c.087.004.51-.01.56.064.05.075-.126.149-.183.183-.33.197-.66.391-.99.587zM7.867 7.687L6.624 6.254c-.45.423-.895.835-1.321 1.225l.362-.078a.482.482 0 01.439.13l.58.65c.122.122.142.334.096.5l-.065.308c.367-.423.755-.862 1.152-1.302z" fill="#2F7889"/><g><path d="M13.378 12.86l-.744.643a.686.686 0 01-.968-.072L2.84 2.779l1.135-.853 9.459 9.976a.668.668 0 01-.057.957z" fill="#A06841"/><path d="M3.648 3.752l2.1 2.535c.328-.493.494-1.084.629-1.83l-2.028-2.14a1.838 1.838 0 00-.414.48 2.17 2.17 0 00-.287.955z" fill="#7D5133"/><path d="M7.81.438C5.885.416 5.17.588 4.098 1.515l-.966.835c-.35.302-.815.566-.742 1.089.027.19.086.384.05.573-.034.179-.242.268-.39.166-.139-.096-.292-.214-.463-.234a.588.588 0 00-.45.14l-.747.664s-.107.434.729 1.38c.835.946 1.373.878 1.373.878l.702-.618a.53.53 0 00.176-.412c-.003-.184-.11-.326-.174-.49-.013-.031-.083-.143.04-.244.109-.094.333-.062.46-.027.129.034.25.088.38.122.25.065.369-.051.543-.201L6.013 3.93c.619-.536-.325-1.474-.325-1.474C5.244 1.953 7.941.687 7.941.687c.198-.069.138-.246-.13-.249z" fill="#82AEC0"/><path d="M4.076 5.338a.504.504 0 00.14.016v-.02c-.011-.12-.077-.23-.144-.33A7.18 7.18 0 002.545 3.33a1.683 1.683 0 00-.154-.111.726.726 0 00-.002.22c.027.19.086.384.05.573-.038.196-.242.25-.399.177a3.27 3.27 0 011.011 1.027c.035.056.07.115.11.168a.2.2 0 01.075-.14c.109-.095.333-.063.46-.029.13.034.25.088.38.123zM1.778 5.573c.585.613.914 1.247.734 1.42-.179.17-.799-.186-1.384-.797C.542 5.584.21 4.92.388 4.748c.18-.171.804.213 1.39.825z" fill="#2F7889"/><path d="M4.057 2.41c.465-.198.88-.623 1.422-1.09A2.53 2.53 0 016.03.964c.076-.035.048-.149-.036-.148-.278.005-.527.09-.772.196-.342.149-.644.374-.935.608-.2.16-.67.555-.965.805-.055.047-.012.12.06.12.208.002.325.014.674-.135zM1.124 4.352c-.196.221.055.281.496.646.311.257.642.018.645-.223.003-.216-.052-.333-.366-.53-.315-.199-.597-.093-.775.107z" fill="#B9E4EA"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 3.2 KiB |
234
frontend/public/Images/blankDashboardTemplatePreview.svg
Normal file
|
After Width: | Height: | Size: 204 KiB |
19
frontend/public/Images/eyesEmoji.svg
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<svg width="32" height="33" viewBox="0 0 32 33" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M9.36806 25.9481C5.93935 25.9481 3.15283 21.7098 3.15283 16.5002C3.15283 11.2907 5.94157 7.05238 9.36806 7.05238C12.7945 7.05238 15.5833 11.2907 15.5833 16.5002C15.5833 21.7098 12.7945 25.9481 9.36806 25.9481Z" fill="#FAFAFA"/>
|
||||||
|
<path d="M9.36815 7.49694C10.8414 7.49694 12.2524 8.38594 13.3391 10.0017C14.499 11.7241 15.139 14.0333 15.139 16.5003C15.139 18.9673 14.499 21.2764 13.3391 22.9989C12.2524 24.6146 10.8414 25.5036 9.36815 25.5036C7.89489 25.5036 6.48385 24.6146 5.39724 22.9989C4.23508 21.2764 3.59734 18.9673 3.59734 16.5003C3.59734 14.0333 4.23731 11.7241 5.39724 10.0017C6.48385 8.38594 7.89267 7.49694 9.36815 7.49694ZM9.36815 6.60794C5.69056 6.60794 2.7085 11.0374 2.7085 16.5003C2.7085 21.9632 5.69056 26.3926 9.36815 26.3926C13.0457 26.3926 16.0278 21.9632 16.0278 16.5003C16.0278 11.0374 13.0457 6.60794 9.36815 6.60794Z" fill="#B0BEC5"/>
|
||||||
|
<path d="M7.47266 15.5762C6.87269 15.0118 7.00602 13.8919 7.77487 13.0741C7.81486 13.0319 7.85486 12.9919 7.89708 12.9541C7.55488 12.7608 7.17934 12.6519 6.78381 12.6519C5.18611 12.6519 3.89062 14.414 3.89062 16.585C3.89062 18.756 5.18611 20.5182 6.78381 20.5182C8.3815 20.5182 9.67699 18.756 9.67699 16.585C9.67699 16.1962 9.63477 15.8184 9.55699 15.4629C8.83703 15.9806 7.97708 16.0495 7.47266 15.5762Z" fill="url(#paint0_linear_2122_5062)"/>
|
||||||
|
<path d="M22.6294 26.3932C26.3074 26.3932 29.289 21.9642 29.289 16.5008C29.289 11.0374 26.3074 6.60847 22.6294 6.60847C18.9514 6.60847 15.9697 11.0374 15.9697 16.5008C15.9697 21.9642 18.9514 26.3932 22.6294 26.3932Z" fill="#EEEEEE"/>
|
||||||
|
<path d="M22.6283 25.9493C19.2018 25.9493 16.4131 21.711 16.4131 16.5014C16.4131 11.2919 19.2018 7.05357 22.6283 7.05357C26.0548 7.05357 28.8435 11.2919 28.8435 16.5014C28.8435 21.711 26.057 25.9493 22.6283 25.9493Z" fill="#FAFAFA"/>
|
||||||
|
<path d="M22.6284 7.49816C24.1017 7.49816 25.5127 8.38716 26.5993 10.0029C27.7592 11.7254 28.3992 14.0345 28.3992 16.5015C28.3992 18.9685 27.7592 21.2777 26.5993 23.0001C25.5127 24.6159 24.1017 25.5049 22.6284 25.5049C21.1551 25.5049 19.7441 24.6159 18.6575 23.0001C17.4976 21.2777 16.8576 18.9685 16.8576 16.5015C16.8576 14.0345 17.4976 11.7254 18.6575 10.0029C19.7441 8.38716 21.1551 7.49816 22.6284 7.49816ZM22.6284 6.60916C18.9508 6.60916 15.9688 11.0386 15.9688 16.5015C15.9688 21.9644 18.9508 26.3939 22.6284 26.3939C26.306 26.3939 29.2881 21.9644 29.2881 16.5015C29.2881 11.0386 26.306 6.60916 22.6284 6.60916Z" fill="#B0BEC5"/>
|
||||||
|
<path d="M20.7339 15.5767C20.1339 15.0123 20.2672 13.8924 21.0361 13.0746C21.0761 13.0324 21.1161 12.9924 21.1583 12.9546C20.8161 12.7613 20.4406 12.6524 20.045 12.6524C18.4473 12.6524 17.1519 14.4146 17.1519 16.5856C17.1519 18.7566 18.4473 20.5187 20.045 20.5187C21.6427 20.5187 22.9382 18.7566 22.9382 16.5856C22.9382 16.1967 22.896 15.8189 22.8182 15.4634C22.1005 15.9812 21.2383 16.05 20.7339 15.5767Z" fill="url(#paint1_linear_2122_5062)"/>
|
||||||
|
<defs>
|
||||||
|
<linearGradient id="paint0_linear_2122_5062" x1="6.78232" y1="12.651" x2="6.78232" y2="20.5188" gradientUnits="userSpaceOnUse">
|
||||||
|
<stop stop-color="#424242"/>
|
||||||
|
<stop offset="1" stop-color="#212121"/>
|
||||||
|
</linearGradient>
|
||||||
|
<linearGradient id="paint1_linear_2122_5062" x1="20.0449" y1="12.6515" x2="20.0449" y2="20.5193" gradientUnits="userSpaceOnUse">
|
||||||
|
<stop stop-color="#424242"/>
|
||||||
|
<stop offset="1" stop-color="#212121"/>
|
||||||
|
</linearGradient>
|
||||||
|
</defs>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 3.3 KiB |
9
frontend/public/Images/redisTemplatePreview.svg
Normal file
|
After Width: | Height: | Size: 1.7 MiB |
1
frontend/public/Logos/azure-aks.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.94" y1="3.74" x2="8.67" y2="3.74" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="b" x1="9.13" y1="3.79" x2="14.85" y2="3.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="c" x1=".01" y1="9.12" x2="5.73" y2="9.12" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="d" x1="6.18" y1="9.08" x2="11.9" y2="9.08" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="e" x1="12.35" y1="9.13" x2="18.08" y2="9.13" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="f" x1="2.87" y1="14.56" x2="8.6" y2="14.56" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="g" x1="9.05" y1="14.6" x2="14.78" y2="14.6" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient></defs><path fill="url(#a)" d="M5.8 1.22l-2.86.53v3.9l2.86.61 2.87-1.15V2.2L5.8 1.22z"/><path d="M5.91 6.2l2.62-1.06A.2.2 0 008.65 5V2.36a.21.21 0 00-.13-.18l-2.65-.9h-.12l-2.6.48a.2.2 0 00-.15.18v3.53a.19.19 0 00.15.19l2.63.55a.32.32 0 00.13-.01z" fill="none"/><path d="M2.94 1.75v3.9l2.89.61v-5zm1.22 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2l.93-.16z" fill="#341a6e"/><path fill="url(#b)" d="M11.99 1.27l-2.86.53v3.9l2.86.61 2.86-1.16v-2.9l-2.86-.98z"/><path d="M9.13 1.8v3.9l2.87.61v-5zm1.21 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2.05l.93-.17z" fill="#341a6e"/><path fill="url(#c)" d="M2.87 6.6l-2.86.53v3.9l2.86.61 2.87-1.15V7.58L2.87 6.6z"/><path d="M0 7.13V11l2.89.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.27.26l-.93-.15V7.38l.93-.16z" fill="#341a6e"/><path fill="url(#d)" d="M9.04 6.56l-2.86.53v3.9l2.86.62 2.86-1.16V7.54l-2.86-.98z"/><path d="M6.18 7.09V11l2.88.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.34l.93-.16z" fill="#341a6e"/><path fill="url(#e)" d="M15.21 6.61l-2.86.53v3.9l2.86.61 2.87-1.15V7.59l-2.87-.98z"/><path d="M12.35 7.14V11l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.39l.93-.16z" fill="#341a6e"/><path fill="url(#f)" d="M5.73 12.04l-2.86.52v3.9l2.86.62 2.87-1.16v-2.9l-2.87-.98z"/><path d="M5.84 17l2.61-1a.18.18 0 00.12-.18v-2.6a.2.2 0 00-.13-.22l-2.64-.9a.17.17 0 00-.12 0l-2.6.47a.19.19 0 00-.16.19v3.54a.19.19 0 00.15.19L5.7 17a.23.23 0 00.14 0z" fill="none"/><path d="M2.87 12.56v3.9l2.89.62V12zm1.22 3.61L3.28 16v-3l.81-.14zm1.26.23l-.93-.15v-3.44l.93-.16z" fill="#341a6e"/><path fill="url(#g)" d="M11.91 12.08l-2.86.53v3.9l2.86.61 2.87-1.15v-2.91l-2.87-.98z"/><path d="M9.05 12.61v3.9l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15v-3.43l.93-.16z" fill="#341a6e"/></svg>
|
||||||
|
After Width: | Height: | Size: 3.1 KiB |
1
frontend/public/Logos/azure-app-service.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="b" x1="4.4" y1="11.48" x2="4.37" y2="7.53" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="c" x1="10.13" y1="15.45" x2="10.13" y2="11.9" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="d" x1="14.18" y1="11.15" x2="14.18" y2="7.38" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><radialGradient id="a" cx="13428.81" cy="3518.86" r="56.67" gradientTransform="matrix(.15 0 0 .15 -2005.33 -518.83)" gradientUnits="userSpaceOnUse"><stop offset=".18" stop-color="#5ea0ef"/><stop offset="1" stop-color="#0078d4"/></radialGradient></defs><path d="M14.21 15.72A8.5 8.5 0 013.79 2.28l.09-.06a8.5 8.5 0 0110.33 13.5" fill="url(#a)"/><path d="M6.69 7.23a13 13 0 018.91-3.58 8.47 8.47 0 00-1.49-1.44 14.34 14.34 0 00-4.69 1.1 12.54 12.54 0 00-4.08 2.82 2.76 2.76 0 011.35 1.1zM2.48 10.65a17.86 17.86 0 00-.83 2.62 7.82 7.82 0 00.62.92c.18.23.35.44.55.65a17.94 17.94 0 011.08-3.47 2.76 2.76 0 01-1.42-.72z" fill="#fff" opacity=".6"/><path d="M3.46 6.11a12 12 0 01-.69-2.94 8.15 8.15 0 00-1.1 1.45A12.69 12.69 0 002.24 7a2.69 2.69 0 011.22-.89z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><path d="M8.36 13.67a1.77 1.77 0 01.54-1.27 11.88 11.88 0 01-2.53-1.86 2.74 2.74 0 01-1.49.83 13.1 13.1 0 001.45 1.28 12.12 12.12 0 002.05 1.25 1.79 1.79 0 01-.02-.23zM14.66 13.88a12 12 0 01-2.76-.32.41.41 0 010 .11 1.75 1.75 0 01-.51 1.24 13.69 13.69 0 003.42.24A8.21 8.21 0 0016 13.81a11.5 11.5 0 01-1.34.07z" fill="#f2f2f2" opacity=".55"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/><path d="M12.32 8.93a1.83 1.83 0 01.61-1 25.5 25.5 0 01-4.46-4.14 16.91 16.91 0 01-2-2.92 7.64 7.64 0 00-1.09.42 18.14 18.14 0 002.15 3.18 26.44 26.44 0 004.79 4.46z" fill="#f2f2f2" opacity=".7"/><circle cx="14.18" cy="9.27" r="1.89" fill="url(#d)"/><path d="M17.35 10.54l-.35-.17-.3-.16h-.06l-.26-.21h-.07L16 9.8a1.76 1.76 0 01-.64.92c.12.08.25.15.38.22l.08.05.35.19.86.45a8.63 8.63 0 00.29-1.11z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |