Compare commits
842 Commits
v0.29.3
...
error-resp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a58d70079b | ||
|
|
fe87711b25 | ||
|
|
f824aa17dc | ||
|
|
e6ee5fc9e3 | ||
|
|
a788230e70 | ||
|
|
34750aba84 | ||
|
|
dbfa4e80bb | ||
|
|
ded58f5392 | ||
|
|
aa9689e025 | ||
|
|
f391ca8bb1 | ||
|
|
191a2a319d | ||
|
|
313fa4ae23 | ||
|
|
cacf4b99c2 | ||
|
|
1f4a8b9834 | ||
|
|
a681f6f397 | ||
|
|
2ae341cfa9 | ||
|
|
dc294ff6d5 | ||
|
|
af8907d4f8 | ||
|
|
f01b4f2c03 | ||
|
|
a94231c00a | ||
|
|
2f949d2738 | ||
|
|
1645523ae9 | ||
|
|
a319d1ec53 | ||
|
|
699f79d6ba | ||
|
|
a3e36cbac9 | ||
|
|
f2aba5035a | ||
|
|
6af5aa0253 | ||
|
|
8a9c8031f5 | ||
|
|
cf54b5f9ec | ||
|
|
a17928df88 | ||
|
|
c53e6de689 | ||
|
|
35c054835a | ||
|
|
e5f96ac896 | ||
|
|
694f2562bf | ||
|
|
6a829489a8 | ||
|
|
d1c075983f | ||
|
|
4c7f90dad8 | ||
|
|
bc8a235915 | ||
|
|
c703f5290a | ||
|
|
309ed3d1de | ||
|
|
2a3622130f | ||
|
|
7e9bf2d48d | ||
|
|
b39f703919 | ||
|
|
7f39d8282c | ||
|
|
9733612be8 | ||
|
|
1ce36c8344 | ||
|
|
ac2dc44abb | ||
|
|
2145e353c8 | ||
|
|
be9c3f0697 | ||
|
|
03838f5fcc | ||
|
|
592073a564 | ||
|
|
93df475969 | ||
|
|
0f9c8d91ef | ||
|
|
67779d6c2c | ||
|
|
f927969c7d | ||
|
|
ba0f63ad1e | ||
|
|
0760917a4b | ||
|
|
6aded04b7f | ||
|
|
b849705710 | ||
|
|
c913c8bf20 | ||
|
|
1328f05d78 | ||
|
|
1db1f76a72 | ||
|
|
932d892d9e | ||
|
|
3538815331 | ||
|
|
956a4d081d | ||
|
|
10b543dff1 | ||
|
|
96162d7949 | ||
|
|
3085093130 | ||
|
|
83d0ddeec0 | ||
|
|
ab444af8e6 | ||
|
|
749fba67cb | ||
|
|
fe96a78ee8 | ||
|
|
f77089da55 | ||
|
|
1d1d85efa3 | ||
|
|
c1c5c4dfa8 | ||
|
|
9eab315f76 | ||
|
|
1d86e5eb50 | ||
|
|
2f7495c6e4 | ||
|
|
1369fe1912 | ||
|
|
76b1e40cbc | ||
|
|
52e4c2d8ff | ||
|
|
7e79900973 | ||
|
|
f818a86720 | ||
|
|
0cf8817f3f | ||
|
|
0d043bf380 | ||
|
|
3b599ea41a | ||
|
|
da74619a46 | ||
|
|
93babc3019 | ||
|
|
f8b8080853 | ||
|
|
3c2bc06e6a | ||
|
|
a0d866c2ff | ||
|
|
52c8584e63 | ||
|
|
9a908c3f76 | ||
|
|
4887a1d8dd | ||
|
|
f2b0387a1b | ||
|
|
cbb9fd51f8 | ||
|
|
10e44ce440 | ||
|
|
6827d66ae9 | ||
|
|
611ec3e08d | ||
|
|
4ab350e721 | ||
|
|
631c12259f | ||
|
|
de497bf5b6 | ||
|
|
12be6ce020 | ||
|
|
2dbe598b2c | ||
|
|
cf64da2631 | ||
|
|
9ff0e34038 | ||
|
|
d313f44556 | ||
|
|
5a778dcb18 | ||
|
|
7e31b4ca01 | ||
|
|
3efd9801a1 | ||
|
|
0cbaa17d9f | ||
|
|
30bfad527f | ||
|
|
9f1c45bc32 | ||
|
|
51becf7cfb | ||
|
|
7460e650af | ||
|
|
a544723bb8 | ||
|
|
eb6f038db5 | ||
|
|
47dcd994f0 | ||
|
|
211fe4fdd5 | ||
|
|
e2992b42c1 | ||
|
|
3957d91a9b | ||
|
|
967aa16f21 | ||
|
|
08b1a87cb5 | ||
|
|
03ddcdd20e | ||
|
|
1aec7f3ca6 | ||
|
|
241edcb88a | ||
|
|
27d12871af | ||
|
|
e78e1d4b63 | ||
|
|
64bf580323 | ||
|
|
152aa4b518 | ||
|
|
b3d5831574 | ||
|
|
b85b9f42ed | ||
|
|
5c1c09c790 | ||
|
|
33960b05fd | ||
|
|
191d9b0648 | ||
|
|
7d81bc3417 | ||
|
|
506916661d | ||
|
|
5326f2d23b | ||
|
|
dfaa344dce | ||
|
|
882b540a0b | ||
|
|
1c4b579c3d | ||
|
|
706f25cc5d | ||
|
|
e6e0a59f5f | ||
|
|
b2c170c752 | ||
|
|
ee421af95c | ||
|
|
453be9074d | ||
|
|
3272444e13 | ||
|
|
71b3e6d522 | ||
|
|
6cf7cc9f4f | ||
|
|
5ec2f17d09 | ||
|
|
a45fb8ec0c | ||
|
|
bd148bbd5a | ||
|
|
1306e99ca8 | ||
|
|
1a8f063b4b | ||
|
|
c7668b9a78 | ||
|
|
5e3dba2587 | ||
|
|
374f30e0cd | ||
|
|
38d2833931 | ||
|
|
731eacbbca | ||
|
|
a63bb139bf | ||
|
|
a140bef0e6 | ||
|
|
48e5436167 | ||
|
|
0fc664a387 | ||
|
|
5817d50652 | ||
|
|
bb318cf52a | ||
|
|
ec0185da61 | ||
|
|
fc2bdb610f | ||
|
|
a9464de62d | ||
|
|
57bfdedfe1 | ||
|
|
7bdc9c0cb0 | ||
|
|
0d5934d56b | ||
|
|
3a5a61aff9 | ||
|
|
a54b7baa7d | ||
|
|
cd63dd972d | ||
|
|
389058b9b4 | ||
|
|
27e412d1ee | ||
|
|
03dccb0101 | ||
|
|
25b74b48a5 | ||
|
|
6815a96d29 | ||
|
|
e9bb05cc5d | ||
|
|
31c0b94ae6 | ||
|
|
59c242961f | ||
|
|
872ed9e963 | ||
|
|
d6cd155988 | ||
|
|
7f4a61ffb1 | ||
|
|
7737d513a7 | ||
|
|
2bd666efae | ||
|
|
d98265f345 | ||
|
|
b480ff1e48 | ||
|
|
af353b9340 | ||
|
|
96e7505922 | ||
|
|
8f6f2f0018 | ||
|
|
1f25d386df | ||
|
|
2d7a3733da | ||
|
|
ff2a3bc4b0 | ||
|
|
33383a4503 | ||
|
|
f05b94c01e | ||
|
|
fd632f9952 | ||
|
|
fd84d7b492 | ||
|
|
e4808e585a | ||
|
|
5cfeb56f9c | ||
|
|
b947f823d7 | ||
|
|
1520c1c57d | ||
|
|
f8477981d8 | ||
|
|
9b1d596816 | ||
|
|
6a4aa9a956 | ||
|
|
a7b0ef55ad | ||
|
|
87534b6fb6 | ||
|
|
c76cef47ba | ||
|
|
3276dfa03e | ||
|
|
1a14cc305c | ||
|
|
0c7e63d735 | ||
|
|
eb74cb4c5e | ||
|
|
a47d3289d0 | ||
|
|
8ad827130e | ||
|
|
93bdfd3d83 | ||
|
|
22d8889a07 | ||
|
|
7c93944d40 | ||
|
|
ec9dbb6853 | ||
|
|
7a7d814288 | ||
|
|
3babce3ecf | ||
|
|
1610b95b84 | ||
|
|
8c02f8ec31 | ||
|
|
5e0e9da6c4 | ||
|
|
51abe71421 | ||
|
|
00d74bfebb | ||
|
|
39e0ef68ca | ||
|
|
cff20f88cd | ||
|
|
a34c59762b | ||
|
|
397da5857f | ||
|
|
43ceb052d8 | ||
|
|
6eced60bf5 | ||
|
|
7c2f5352d2 | ||
|
|
e6e377beff | ||
|
|
6da9de6591 | ||
|
|
7549aee656 | ||
|
|
da4a6266c5 | ||
|
|
6ac938f2a6 | ||
|
|
990fc83269 | ||
|
|
5d5ff47d5e | ||
|
|
9f30bba9a8 | ||
|
|
6014bb76b6 | ||
|
|
e25b54f86a | ||
|
|
5959963b9d | ||
|
|
4fbb71484d | ||
|
|
f8e8132b58 | ||
|
|
a1dd170641 | ||
|
|
fe2ddf9d60 | ||
|
|
dfc99a7756 | ||
|
|
c2556facc2 | ||
|
|
31b1d58a70 | ||
|
|
0ac9f6f663 | ||
|
|
a30b75a2a8 | ||
|
|
dbd4363ff8 | ||
|
|
ad1b01f225 | ||
|
|
e1679790f7 | ||
|
|
ae594061e9 | ||
|
|
9e02147d4c | ||
|
|
2b3d1c8ee5 | ||
|
|
4c91dbcff0 | ||
|
|
83f68f13db | ||
|
|
994814864c | ||
|
|
f24135f5b0 | ||
|
|
5745727031 | ||
|
|
ae0d685b29 | ||
|
|
f34a49e19c | ||
|
|
9e557a0ebe | ||
|
|
0df3c26f04 | ||
|
|
0df86454ce | ||
|
|
63f0ae1c7c | ||
|
|
d9f232683d | ||
|
|
ad9d77d33f | ||
|
|
5a8479f4e9 | ||
|
|
f4e94c0ad1 | ||
|
|
6f3183823f | ||
|
|
01bb39da6a | ||
|
|
43f9830e8d | ||
|
|
4c2174958f | ||
|
|
07747e73d6 | ||
|
|
60946b5e9d | ||
|
|
0365fa5421 | ||
|
|
2a7ad596a1 | ||
|
|
6c455ab5ce | ||
|
|
7c062163a1 | ||
|
|
d6a256247c | ||
|
|
0e2c699518 | ||
|
|
c04d0e9419 | ||
|
|
cf22039562 | ||
|
|
2a62982885 | ||
|
|
1e1624ed4c | ||
|
|
d0feff00a7 | ||
|
|
6c2a3d5d43 | ||
|
|
914b035b3f | ||
|
|
6b3af78873 | ||
|
|
6adeef7e70 | ||
|
|
44dc55c5ac | ||
|
|
3c419677e1 | ||
|
|
aadb962b6c | ||
|
|
c6080ca02e | ||
|
|
506448fe61 | ||
|
|
a42176599f | ||
|
|
adef0a4138 | ||
|
|
c9816cce18 | ||
|
|
c6c2b9d809 | ||
|
|
d9b379ae51 | ||
|
|
dd2afe19f6 | ||
|
|
0326a4d42a | ||
|
|
b4d12966f3 | ||
|
|
5a2d729ba9 | ||
|
|
666916fae2 | ||
|
|
4b4008642d | ||
|
|
7c2007faa3 | ||
|
|
6b87118fc6 | ||
|
|
49aba4fb1c | ||
|
|
9ace374855 | ||
|
|
a4d5774ae3 | ||
|
|
d0d10daa44 | ||
|
|
e519539468 | ||
|
|
7051831539 | ||
|
|
c842e68288 | ||
|
|
a295bf2fb6 | ||
|
|
4cd40391c5 | ||
|
|
7af4ba34af | ||
|
|
54c69311ed | ||
|
|
62af836554 | ||
|
|
f9b3ca01f9 | ||
|
|
0c4149225f | ||
|
|
7136ecc2fe | ||
|
|
0c14145ef9 | ||
|
|
6618b47123 | ||
|
|
ab5285dee6 | ||
|
|
fdd7e022e9 | ||
|
|
90d7f0200a | ||
|
|
2713e186d3 | ||
|
|
ffdb4cfff0 | ||
|
|
b3b7522250 | ||
|
|
0870030d1c | ||
|
|
3fece44aef | ||
|
|
e5de35a769 | ||
|
|
44ff1517d1 | ||
|
|
d77389abe3 | ||
|
|
1a62a13aea | ||
|
|
97fdba0fae | ||
|
|
5c2a9e8362 | ||
|
|
1aaafa4638 | ||
|
|
71c4fcc382 | ||
|
|
9af1c2320b | ||
|
|
cdabf9060e | ||
|
|
4eb1948e4c | ||
|
|
fe0ba5e3ba | ||
|
|
8add13743a | ||
|
|
9964e3425a | ||
|
|
ddaa464d97 | ||
|
|
8f9d643923 | ||
|
|
d9ab100da3 | ||
|
|
7d32c63398 | ||
|
|
89c6eba913 | ||
|
|
c38247abe4 | ||
|
|
f9eddc9b18 | ||
|
|
17de5836bd | ||
|
|
fe37a2e7e0 | ||
|
|
aad840da59 | ||
|
|
f2d5d21581 | ||
|
|
f3bc1a8f8a | ||
|
|
f069ecdb76 | ||
|
|
493aef0241 | ||
|
|
7bca847f11 | ||
|
|
0cb60e1c10 | ||
|
|
ecd5ce92c2 | ||
|
|
aa67b47053 | ||
|
|
e2669eb370 | ||
|
|
c4bbbf372c | ||
|
|
0c59953cb5 | ||
|
|
b10f17de78 | ||
|
|
bbf9787fb3 | ||
|
|
d11c1eb439 | ||
|
|
548c531956 | ||
|
|
4e75479831 | ||
|
|
633b551e5d | ||
|
|
f734142419 | ||
|
|
aa9a3e9349 | ||
|
|
ab950135ff | ||
|
|
b4e0e89b05 | ||
|
|
12a33960ff | ||
|
|
65ed0c0c05 | ||
|
|
6eb7693294 | ||
|
|
7ec25b4f62 | ||
|
|
b3bc78d23c | ||
|
|
bd4786f128 | ||
|
|
81241170e5 | ||
|
|
e0df371a8d | ||
|
|
cfea51d9ee | ||
|
|
037f5ae4c8 | ||
|
|
d6b7587bbe | ||
|
|
0dffd86287 | ||
|
|
c75a44c620 | ||
|
|
cbf3041dde | ||
|
|
d0b43f3802 | ||
|
|
1ee672c020 | ||
|
|
ad8924ed13 | ||
|
|
cff0e1cf1e | ||
|
|
02f83e4b4a | ||
|
|
6bc5ceac3e | ||
|
|
3a20862d0c | ||
|
|
0e331dd177 | ||
|
|
ab4f6adb19 | ||
|
|
50834be4db | ||
|
|
260d21afd0 | ||
|
|
3b98073ad4 | ||
|
|
6bd2c1ba74 | ||
|
|
968cc0eb82 | ||
|
|
3ce385ef23 | ||
|
|
c6581782d0 | ||
|
|
61977ebe86 | ||
|
|
56b71d0f02 | ||
|
|
f6ab060545 | ||
|
|
554c4332c4 | ||
|
|
9d689693b4 | ||
|
|
26bc94fc46 | ||
|
|
6837c41090 | ||
|
|
8fe0e60208 | ||
|
|
00b111fbe3 | ||
|
|
0bebd3e338 | ||
|
|
d5e0a26f55 | ||
|
|
48ebe91713 | ||
|
|
5bc3c074f8 | ||
|
|
f5b5a9a657 | ||
|
|
ac835c80e9 | ||
|
|
2cf0bb4fa5 | ||
|
|
0f44246795 | ||
|
|
64307f323f | ||
|
|
616b8e0a45 | ||
|
|
2c0690a8ee | ||
|
|
2f361de693 | ||
|
|
457380c065 | ||
|
|
96e3d00e74 | ||
|
|
d224e08145 | ||
|
|
13ced00a35 | ||
|
|
5c60a862e5 | ||
|
|
78c9330666 | ||
|
|
01fc7a7fd4 | ||
|
|
0200fb3a21 | ||
|
|
e977963763 | ||
|
|
824d9aaf85 | ||
|
|
4db3e5e542 | ||
|
|
a8b293a510 | ||
|
|
4a4f48cec8 | ||
|
|
7e5cf65ea3 | ||
|
|
bb7417ffbd | ||
|
|
085cf34a49 | ||
|
|
be27a92fc9 | ||
|
|
253137a6b8 | ||
|
|
fce7ab7d24 | ||
|
|
71f6b355c4 | ||
|
|
110b545454 | ||
|
|
5b0e3d375a | ||
|
|
9e05cb48fe | ||
|
|
6d67ca72a0 | ||
|
|
0626081eee | ||
|
|
199d52b39f | ||
|
|
204cad8448 | ||
|
|
8c6096d60e | ||
|
|
9de9fb5863 | ||
|
|
64d854ffa2 | ||
|
|
6b073280a4 | ||
|
|
79e6699b37 | ||
|
|
d563778479 | ||
|
|
255b3dd3b0 | ||
|
|
00e97fa401 | ||
|
|
9755ba6b47 | ||
|
|
f3fdd2dd6c | ||
|
|
d4248fe933 | ||
|
|
a8d70206ab | ||
|
|
7b344f7a75 | ||
|
|
f0669a6dc1 | ||
|
|
4a7d972c85 | ||
|
|
51c1f88593 | ||
|
|
c1b9049176 | ||
|
|
46559014f7 | ||
|
|
0c1a500142 | ||
|
|
26d6a869c6 | ||
|
|
f99da73098 | ||
|
|
4a1c48b72b | ||
|
|
1163c16506 | ||
|
|
bb558dde8e | ||
|
|
e89c000252 | ||
|
|
4e8e7745c1 | ||
|
|
512fcda33d | ||
|
|
6f43b085b0 | ||
|
|
54038b8ddf | ||
|
|
00c9ef50de | ||
|
|
52750e5248 | ||
|
|
cbce1b1847 | ||
|
|
abaf6126e5 | ||
|
|
739b1bf387 | ||
|
|
cbf150ef7b | ||
|
|
c28f367f46 | ||
|
|
1e679a0d64 | ||
|
|
6f5f361a7e | ||
|
|
d65d75ef69 | ||
|
|
722a38491e | ||
|
|
361efd3b52 | ||
|
|
7b46f86f7f | ||
|
|
5b39dc36d6 | ||
|
|
5fe7948be9 | ||
|
|
a47a90b0f3 | ||
|
|
be6bca3717 | ||
|
|
92717774a2 | ||
|
|
e7fabca38e | ||
|
|
525dea343c | ||
|
|
7d960b79dd | ||
|
|
bdd7778e58 | ||
|
|
105216de3e | ||
|
|
3072b7eb01 | ||
|
|
fd9a502012 | ||
|
|
cf6dc827cc | ||
|
|
6530873994 | ||
|
|
c9c0bd38be | ||
|
|
9ac22fcb10 | ||
|
|
86ff865842 | ||
|
|
e792c48f6d | ||
|
|
8ee92516ca | ||
|
|
79c05d8fa8 | ||
|
|
019bc8c1df | ||
|
|
d688399b91 | ||
|
|
cfc239e3c9 | ||
|
|
3572baa5eb | ||
|
|
ff26c5f69c | ||
|
|
9230f2442f | ||
|
|
7fed80b145 | ||
|
|
a268bb910c | ||
|
|
fbbe0bef86 | ||
|
|
bcd6ac47f7 | ||
|
|
ec27916fa5 | ||
|
|
263ac9fa5a | ||
|
|
e3b2882811 | ||
|
|
63efb2b25a | ||
|
|
788a38d39c | ||
|
|
bff39daef0 | ||
|
|
f2521b4c49 | ||
|
|
16b846006a | ||
|
|
0ea8e8e6b8 | ||
|
|
da0ea7eb49 | ||
|
|
ca41d7011e | ||
|
|
f15e8f2fed | ||
|
|
2e0fdbb498 | ||
|
|
585d6e2a21 | ||
|
|
d93cc767a6 | ||
|
|
a363b98657 | ||
|
|
2031a014a7 | ||
|
|
43d5ee9651 | ||
|
|
f8bb42a13c | ||
|
|
1be4731710 | ||
|
|
90b8959045 | ||
|
|
f487c1956b | ||
|
|
6b2f03d43f | ||
|
|
581bd07b35 | ||
|
|
c5cba68b53 | ||
|
|
a6b6abf1a7 | ||
|
|
7526888886 | ||
|
|
ce8fdd509b | ||
|
|
2baa6028b5 | ||
|
|
8e653f9500 | ||
|
|
cb1a823f91 | ||
|
|
c0b0920901 | ||
|
|
77b4e71543 | ||
|
|
9d44ce3ee2 | ||
|
|
1d014ab4f7 | ||
|
|
418ab67d50 | ||
|
|
7efe907757 | ||
|
|
1d1154aa8c | ||
|
|
a16fca6376 | ||
|
|
9c1ea0cde9 | ||
|
|
ec500831ef | ||
|
|
fcbf82c2f3 | ||
|
|
a805eb7533 | ||
|
|
a8edc4fd95 | ||
|
|
c66c8c2823 | ||
|
|
c7b59d4405 | ||
|
|
f56b5cb971 | ||
|
|
29b1344557 | ||
|
|
55664872bd | ||
|
|
221861230a | ||
|
|
8b1a781f58 | ||
|
|
b557ca5519 | ||
|
|
e557ff273f | ||
|
|
3c284fc9ee | ||
|
|
bcebe050b1 | ||
|
|
9360c61dca | ||
|
|
fb1dbdc05e | ||
|
|
6170b2c5dc | ||
|
|
9826ab04b3 | ||
|
|
fd9566d471 | ||
|
|
3a1e8d523a | ||
|
|
6dd34a7f29 | ||
|
|
170e5e1686 | ||
|
|
16502feaad | ||
|
|
09d579311e | ||
|
|
8072fede85 | ||
|
|
112783d618 | ||
|
|
4644b1c200 | ||
|
|
bb09c84679 | ||
|
|
fc5f0fbf9e | ||
|
|
d6f0559adc | ||
|
|
0d7f7df76c | ||
|
|
7104d8e0f5 | ||
|
|
a20693fa9f | ||
|
|
0b991331d7 | ||
|
|
aad44a1037 | ||
|
|
3e29161fea | ||
|
|
b616dca52d | ||
|
|
be519666a3 | ||
|
|
a48edac13b | ||
|
|
0a77c7ab85 | ||
|
|
9fb32acf6d | ||
|
|
b2d6d75eef | ||
|
|
07d126c669 | ||
|
|
50d584cc89 | ||
|
|
1b6b3c2fdf | ||
|
|
1f0fdfd403 | ||
|
|
ae3b604cdc | ||
|
|
381f497b95 | ||
|
|
8045c4e5ae | ||
|
|
7451e885c3 | ||
|
|
01df53074c | ||
|
|
b6a79ab22c | ||
|
|
dae817640b | ||
|
|
16839eb7d3 | ||
|
|
780a863943 | ||
|
|
5e0b6366cc | ||
|
|
8eb2b9e3d0 | ||
|
|
97ed163002 | ||
|
|
e18bb7d5bc | ||
|
|
1e4cf2513c | ||
|
|
988ede7776 | ||
|
|
d1acad8ee4 | ||
|
|
f5b1d4146f | ||
|
|
feaac39e2a | ||
|
|
fc4cdea539 | ||
|
|
399d49b3c0 | ||
|
|
ec8a74d385 | ||
|
|
7c87310fa6 | ||
|
|
349c4020f5 | ||
|
|
92e2f1c467 | ||
|
|
e3a89be86b | ||
|
|
40090aaf12 | ||
|
|
4009ac83fe | ||
|
|
e7f9c3981b | ||
|
|
fe75f6347b | ||
|
|
bc72b5fcea | ||
|
|
a54cf38e21 | ||
|
|
94d99ee0a4 | ||
|
|
c109636889 | ||
|
|
d9950d9223 | ||
|
|
a578f9509a | ||
|
|
b1e4ee1d26 | ||
|
|
31b07cc02c | ||
|
|
d42bf50ddb | ||
|
|
93a11b2031 | ||
|
|
af71474bec | ||
|
|
bc942d218b | ||
|
|
f2e7f09a32 | ||
|
|
7e87df2d69 | ||
|
|
c0226ab584 | ||
|
|
84f2885533 | ||
|
|
e58ecff19b | ||
|
|
f4ecfb510a | ||
|
|
c4536f9069 | ||
|
|
2a55f3d680 | ||
|
|
5d6eea3045 | ||
|
|
12029a6d90 | ||
|
|
4083970289 | ||
|
|
b3c0681a85 | ||
|
|
36aced6d1a | ||
|
|
bad69abcc2 | ||
|
|
d091d90d66 | ||
|
|
29bfdb8909 | ||
|
|
31b5635339 | ||
|
|
73fc262f04 | ||
|
|
dc23368f6e | ||
|
|
75526c6de5 | ||
|
|
5b419cb668 | ||
|
|
d8a8430a5b | ||
|
|
dc7a55e871 | ||
|
|
9333fdcd0b | ||
|
|
58ccbdbec4 | ||
|
|
12819113c1 | ||
|
|
37f61ebe60 | ||
|
|
f2f89eb38b | ||
|
|
a99d7f09a1 | ||
|
|
2ae75e6196 | ||
|
|
f86fc03fd6 | ||
|
|
5a9f626da5 | ||
|
|
758013d7cd | ||
|
|
ddc3cc4911 | ||
|
|
6b2f857a12 | ||
|
|
30b0d42604 | ||
|
|
88aabb2060 | ||
|
|
f939d41acd | ||
|
|
d165f727ac | ||
|
|
e4ef137c72 | ||
|
|
dda01678e8 | ||
|
|
3e65543b5f | ||
|
|
050b866173 | ||
|
|
0906886e9a | ||
|
|
8371670512 | ||
|
|
123f2e7d52 | ||
|
|
0ab09c1c67 | ||
|
|
9f5039dbf3 | ||
|
|
5e349d8294 | ||
|
|
b5654c8bfa | ||
|
|
71e487dc0c | ||
|
|
2d60805b28 | ||
|
|
7603e0ebe0 | ||
|
|
1e8a8d19ea | ||
|
|
092d164d55 | ||
|
|
0400d5378b | ||
|
|
626da7533e | ||
|
|
bff7142a61 | ||
|
|
ed3017d247 | ||
|
|
ec3eba612c | ||
|
|
b958a06ba0 | ||
|
|
64f0ff05f9 | ||
|
|
f94a5f4481 | ||
|
|
27869f03bd | ||
|
|
9c21449239 | ||
|
|
991e39aad3 | ||
|
|
eddb607c9c | ||
|
|
3341cb7396 | ||
|
|
4ca1e34378 | ||
|
|
658a9cc11b | ||
|
|
4ef973ceb6 | ||
|
|
bbfaad15c2 | ||
|
|
45ead71359 | ||
|
|
79aef73767 | ||
|
|
fc49833c9f | ||
|
|
b34eafcab1 | ||
|
|
ed4ba1aa24 | ||
|
|
f427bac993 | ||
|
|
7de3cec477 | ||
|
|
856c04220f | ||
|
|
6a8096b8d7 | ||
|
|
9bad663c4f | ||
|
|
720a735338 | ||
|
|
1ad7ba0afd | ||
|
|
176d01544e | ||
|
|
c55be0e392 | ||
|
|
2c2775c766 | ||
|
|
f90ae99018 | ||
|
|
e12cf3e494 | ||
|
|
f12abfbe01 | ||
|
|
7faab85b4d | ||
|
|
5e0c068cb9 | ||
|
|
7a18bddce3 | ||
|
|
0c11b12744 | ||
|
|
ba05991222 | ||
|
|
1f17095e11 | ||
|
|
ab42700245 | ||
|
|
3f912edc98 | ||
|
|
63b503a9fb | ||
|
|
90f7ba191b | ||
|
|
53a78211ef | ||
|
|
838860da40 | ||
|
|
6b2427f1c2 | ||
|
|
e3d08a4275 | ||
|
|
814431e3a8 | ||
|
|
6e20fbb174 | ||
|
|
53dee57e17 | ||
|
|
5c5ee2cc70 | ||
|
|
e0b83bda62 | ||
|
|
f7fe64a8df | ||
|
|
377dbd8aec | ||
|
|
f8d3fa0fdb | ||
|
|
5b858f2963 | ||
|
|
3620cdb5d2 | ||
|
|
546d98ca9c | ||
|
|
cb155a1172 | ||
|
|
ad62106cad | ||
|
|
2d6c5f43a1 | ||
|
|
9a433891f2 | ||
|
|
3c63d66591 | ||
|
|
5b69559762 | ||
|
|
d7a5c6d65b | ||
|
|
1588d3a199 | ||
|
|
d5df9a1f7f | ||
|
|
2be3d35952 | ||
|
|
7fa50070ce | ||
|
|
2494b64ccd | ||
|
|
ca3283fcad | ||
|
|
a912731cc7 | ||
|
|
1a855582a7 | ||
|
|
f3c00e1a57 | ||
|
|
0d3cbb1db2 | ||
|
|
2c96512a8a | ||
|
|
a84a70df14 | ||
|
|
dcea79cef3 | ||
|
|
b12365ba07 | ||
|
|
718eb7b381 | ||
|
|
503417719c | ||
|
|
e7a5eb7b22 | ||
|
|
b14f800fee | ||
|
|
9e91375632 | ||
|
|
d7d4000240 | ||
|
|
e12aef136a | ||
|
|
0e04b779a9 | ||
|
|
587034f573 | ||
|
|
321cba2af5 | ||
|
|
abed60bdfa | ||
|
|
a306fb64cb | ||
|
|
0ad5d67140 | ||
|
|
11863040bb | ||
|
|
a67a3837c8 | ||
|
|
81b10d126a | ||
|
|
9f751688cc | ||
|
|
3d0fbd0065 | ||
|
|
05ea814c61 | ||
|
|
92ba46b2f5 | ||
|
|
4bbe1ea614 | ||
|
|
e3a251ef29 | ||
|
|
a4e0d9c7df | ||
|
|
4076cd9847 | ||
|
|
e3f4fc2967 | ||
|
|
bccefc6a10 | ||
|
|
821471f4ab | ||
|
|
1e242b6d06 | ||
|
|
4ca5176836 | ||
|
|
7f397d529b | ||
|
|
656f354fdc | ||
|
|
4cc3ce224c | ||
|
|
a4a285c074 | ||
|
|
a8f8580606 | ||
|
|
e24918044e | ||
|
|
28d346eafb | ||
|
|
cbd2f4c643 | ||
|
|
fcedc9e445 | ||
|
|
d2d3c4bb36 | ||
|
|
dc4acc0730 | ||
|
|
043e5ca880 | ||
|
|
5c437dd8f9 | ||
|
|
31b898b2c6 | ||
|
|
e186474414 | ||
|
|
8bfb0b5088 |
9
.github/CODEOWNERS
vendored
@@ -1,13 +1,10 @@
|
|||||||
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
||||||
# Owners are automatically requested for review for PRs that changes code
|
# Owners are automatically requested for review for PRs that changes code
|
||||||
# that they own.
|
# that they own.
|
||||||
* @ankitnayan
|
|
||||||
|
|
||||||
/frontend/ @palashgdev
|
/frontend/ @YounixM
|
||||||
|
/frontend/src/container/MetricsApplication @srikanthccv
|
||||||
|
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||||
/deploy/ @prashant-shahi
|
/deploy/ @prashant-shahi
|
||||||
/sample-apps/ @prashant-shahi
|
/sample-apps/ @prashant-shahi
|
||||||
**/query-service/ @srikanthccv
|
|
||||||
Makefile @srikanthccv
|
|
||||||
go.* @srikanthccv
|
|
||||||
.git* @srikanthccv
|
|
||||||
.github @prashant-shahi
|
.github @prashant-shahi
|
||||||
|
|||||||
17
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
### Summary
|
||||||
|
|
||||||
|
<!-- ✍️ A clear and concise description...-->
|
||||||
|
|
||||||
|
#### Related Issues / PR's
|
||||||
|
|
||||||
|
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
|
||||||
|
|
||||||
|
#### Screenshots
|
||||||
|
|
||||||
|
NA
|
||||||
|
|
||||||
|
<!-- ✍️ Add screenshots of before and after changes where applicable-->
|
||||||
|
|
||||||
|
#### Affected Areas and Manually Tested Areas
|
||||||
|
|
||||||
|
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
|
||||||
13
.github/workflows/build.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd frontend && yarn install
|
run: cd frontend && yarn install
|
||||||
- name: Run ESLint
|
- name: Run ESLint
|
||||||
@@ -31,11 +31,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Create .env file
|
- name: Create .env file
|
||||||
run: |
|
run: |
|
||||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd frontend && yarn install
|
run: cd frontend && yarn install
|
||||||
- name: Run ESLint
|
- name: Run ESLint
|
||||||
@@ -53,12 +54,12 @@ jobs:
|
|||||||
build-query-service:
|
build-query-service:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
- name: Setup golang
|
- name: Setup golang
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: "1.21"
|
go-version: "1.21"
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -71,12 +72,12 @@ jobs:
|
|||||||
build-ee-query-service:
|
build-ee-query-service:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
- name: Setup golang
|
- name: Setup golang
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: "1.21"
|
go-version: "1.21"
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Build EE query-service image
|
- name: Build EE query-service image
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/codeql.yaml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
2
.github/workflows/commitlint.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
|||||||
lint-commits:
|
lint-commits:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: wagoid/commitlint-github-action@v5
|
- uses: wagoid/commitlint-github-action@v5
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Codebase
|
- name: Checkout Codebase
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: signoz/gh-bot
|
repository: signoz/gh-bot
|
||||||
- name: Use Node v16
|
- name: Use Node v16
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 16
|
node-version: 16
|
||||||
- name: Setup Cache & Install Dependencies
|
- name: Setup Cache & Install Dependencies
|
||||||
|
|||||||
4
.github/workflows/dependency-review.yml
vendored
@@ -15,8 +15,8 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Checkout Repository'
|
- name: 'Checkout Repository'
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
with:
|
with:
|
||||||
fail-on-severity: high
|
fail-on-severity: high
|
||||||
uses: actions/dependency-review-action@v2
|
uses: actions/dependency-review-action@v3
|
||||||
|
|||||||
13
.github/workflows/e2e-k3s.yaml
vendored
@@ -13,7 +13,12 @@ jobs:
|
|||||||
DOCKER_TAG: pull-${{ github.event.number }}
|
DOCKER_TAG: pull-${{ github.event.number }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
- name: Build query-service image
|
- name: Build query-service image
|
||||||
env:
|
env:
|
||||||
@@ -65,9 +70,9 @@ jobs:
|
|||||||
- name: Kick off a sample-app workload
|
- name: Kick off a sample-app workload
|
||||||
run: |
|
run: |
|
||||||
# start the locust swarm
|
# start the locust swarm
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl --namespace sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||||
|
|
||||||
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
||||||
id: get-subdomain
|
id: get-subdomain
|
||||||
|
|||||||
31
.github/workflows/jest-coverage-changes.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Jest Coverage - changed files
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: develop
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: "refs/heads/develop"
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||||
|
|
||||||
|
- name: Fetch branch
|
||||||
|
run: git fetch origin ${{ github.event.pull_request.head.ref }}
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: lts/*
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: cd frontend && npm install -g yarn && yarn
|
||||||
|
|
||||||
|
- name: npm run test:changedsince
|
||||||
|
run: cd frontend && npm run i18n:generate-hash && npm run test:changedsince
|
||||||
4
.github/workflows/playwright.yaml
vendored
@@ -9,8 +9,8 @@ jobs:
|
|||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v3
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: "16.x"
|
node-version: "16.x"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
|
|||||||
61
.github/workflows/push.yaml
vendored
@@ -14,15 +14,19 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -30,7 +34,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -42,6 +46,11 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-query-service
|
run: make build-push-query-service
|
||||||
|
|
||||||
@@ -49,15 +58,19 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -65,7 +78,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -77,6 +90,11 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-ee-query-service
|
run: make build-push-ee-query-service
|
||||||
|
|
||||||
@@ -84,7 +102,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
run: yarn install
|
run: yarn install
|
||||||
@@ -97,11 +115,11 @@ jobs:
|
|||||||
run: npm run lint
|
run: npm run lint
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -109,7 +127,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -128,11 +146,18 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Create .env file
|
- name: Create .env file
|
||||||
run: |
|
run: |
|
||||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
run: yarn install
|
run: yarn install
|
||||||
@@ -145,11 +170,11 @@ jobs:
|
|||||||
run: npm run lint
|
run: npm run lint
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -157,7 +182,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
|
|||||||
2
.github/workflows/sonar.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Sonar analysis
|
- name: Sonar analysis
|
||||||
|
|||||||
68
.github/workflows/staging-deployment.yaml
vendored
@@ -9,32 +9,46 @@ jobs:
|
|||||||
name: Deploy latest develop branch to staging
|
name: Deploy latest develop branch to staging
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: staging
|
environment: staging
|
||||||
|
permissions:
|
||||||
|
contents: 'read'
|
||||||
|
id-token: 'write'
|
||||||
steps:
|
steps:
|
||||||
- name: Executing remote ssh commands using ssh key
|
- id: 'auth'
|
||||||
uses: appleboy/ssh-action@v0.1.8
|
uses: 'google-github-actions/auth@v2'
|
||||||
env:
|
|
||||||
GITHUB_BRANCH: develop
|
|
||||||
GITHUB_SHA: ${{ github.sha }}
|
|
||||||
with:
|
with:
|
||||||
host: ${{ secrets.HOST_DNS }}
|
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||||
username: ${{ secrets.USERNAME }}
|
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||||
key: ${{ secrets.SSH_KEY }}
|
|
||||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
- name: 'sdk'
|
||||||
command_timeout: 60m
|
uses: 'google-github-actions/setup-gcloud@v2'
|
||||||
script: |
|
|
||||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
- name: 'ssh'
|
||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
shell: bash
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
env:
|
||||||
export OTELCOL_TAG="main"
|
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||||
docker system prune --force
|
GITHUB_SHA: ${{ github.sha }}
|
||||||
docker pull signoz/signoz-otel-collector:main
|
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||||
cd ~/signoz
|
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||||
git status
|
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||||
git add .
|
run: |
|
||||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
read -r -d '' COMMAND <<EOF || true
|
||||||
git fetch origin
|
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||||
git checkout ${GITHUB_BRANCH}
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
git pull
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
make build-ee-query-service-amd64
|
export OTELCOL_TAG="main"
|
||||||
make build-frontend-amd64
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
make run-signoz
|
docker system prune --force
|
||||||
|
docker pull signoz/signoz-otel-collector:main
|
||||||
|
docker pull signoz/signoz-schema-migrator:main
|
||||||
|
cd ~/signoz
|
||||||
|
git status
|
||||||
|
git add .
|
||||||
|
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||||
|
git fetch origin
|
||||||
|
git checkout ${GITHUB_BRANCH}
|
||||||
|
git pull
|
||||||
|
make build-ee-query-service-amd64
|
||||||
|
make build-frontend-amd64
|
||||||
|
make run-testing
|
||||||
|
EOF
|
||||||
|
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||||
|
|||||||
64
.github/workflows/testing-deployment.yaml
vendored
@@ -9,31 +9,47 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: testing
|
environment: testing
|
||||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||||
|
permissions:
|
||||||
|
contents: 'read'
|
||||||
|
id-token: 'write'
|
||||||
steps:
|
steps:
|
||||||
- name: Executing remote ssh commands using ssh key
|
- id: 'auth'
|
||||||
uses: appleboy/ssh-action@v0.1.8
|
uses: 'google-github-actions/auth@v2'
|
||||||
|
with:
|
||||||
|
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||||
|
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||||
|
|
||||||
|
- name: 'sdk'
|
||||||
|
uses: 'google-github-actions/setup-gcloud@v2'
|
||||||
|
|
||||||
|
- name: 'ssh'
|
||||||
|
shell: bash
|
||||||
env:
|
env:
|
||||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||||
GITHUB_SHA: ${{ github.sha }}
|
GITHUB_SHA: ${{ github.sha }}
|
||||||
with:
|
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||||
host: ${{ secrets.HOST_DNS }}
|
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||||
username: ${{ secrets.USERNAME }}
|
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||||
key: ${{ secrets.SSH_KEY }}
|
run: |
|
||||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
read -r -d '' COMMAND <<EOF || true
|
||||||
command_timeout: 60m
|
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||||
script: |
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
export DEV_BUILD="1"
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
export DEV_BUILD="1"
|
docker system prune --force
|
||||||
docker system prune --force
|
cd ~/signoz
|
||||||
cd ~/signoz
|
git status
|
||||||
git status
|
git add .
|
||||||
git add .
|
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
git fetch origin
|
||||||
git fetch origin
|
git checkout develop
|
||||||
git checkout ${GITHUB_BRANCH}
|
git pull
|
||||||
git pull
|
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||||
make build-ee-query-service-amd64
|
git branch -D ${GITHUB_BRANCH}
|
||||||
make build-frontend-amd64
|
git checkout --track origin/${GITHUB_BRANCH}
|
||||||
make run-signoz
|
make build-ee-query-service-amd64
|
||||||
|
make build-frontend-amd64
|
||||||
|
make run-testing
|
||||||
|
EOF
|
||||||
|
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||||
|
|||||||
16
.gitignore
vendored
@@ -37,7 +37,7 @@ frontend/src/constants/env.ts
|
|||||||
**/locust-scripts/__pycache__/
|
**/locust-scripts/__pycache__/
|
||||||
**/__debug_bin
|
**/__debug_bin
|
||||||
|
|
||||||
frontend/.env
|
.env
|
||||||
pkg/query-service/signoz.db
|
pkg/query-service/signoz.db
|
||||||
|
|
||||||
pkg/query-service/tests/test-deploy/data/
|
pkg/query-service/tests/test-deploy/data/
|
||||||
@@ -47,9 +47,23 @@ ee/query-service/signoz.db
|
|||||||
ee/query-service/tests/test-deploy/data/
|
ee/query-service/tests/test-deploy/data/
|
||||||
|
|
||||||
# local data
|
# local data
|
||||||
|
*.backup
|
||||||
*.db
|
*.db
|
||||||
/deploy/docker/clickhouse-setup/data/
|
/deploy/docker/clickhouse-setup/data/
|
||||||
/deploy/docker-swarm/clickhouse-setup/data/
|
/deploy/docker-swarm/clickhouse-setup/data/
|
||||||
bin/
|
bin/
|
||||||
|
|
||||||
*/query-service/queries.active
|
*/query-service/queries.active
|
||||||
|
|
||||||
|
# e2e
|
||||||
|
|
||||||
|
e2e/node_modules/
|
||||||
|
e2e/test-results/
|
||||||
|
e2e/playwright-report/
|
||||||
|
e2e/blob-report/
|
||||||
|
e2e/playwright/.cache/
|
||||||
|
e2e/.auth
|
||||||
|
|
||||||
|
# go
|
||||||
|
vendor/
|
||||||
|
**/main/**
|
||||||
|
|||||||
88
Makefile
@@ -8,6 +8,7 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
|||||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||||
|
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||||
|
|
||||||
# Internal variables or constants.
|
# Internal variables or constants.
|
||||||
FRONTEND_DIRECTORY ?= frontend
|
FRONTEND_DIRECTORY ?= frontend
|
||||||
@@ -15,15 +16,15 @@ QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
|||||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||||
LOCAL_GOOS ?= $(shell go env GOOS)
|
|
||||||
LOCAL_GOARCH ?= $(shell go env GOARCH)
|
GOOS ?= $(shell go env GOOS)
|
||||||
|
GOARCH ?= $(shell go env GOARCH)
|
||||||
|
GOPATH ?= $(shell go env GOPATH)
|
||||||
|
|
||||||
REPONAME ?= signoz
|
REPONAME ?= signoz
|
||||||
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
|
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
|
||||||
|
|
||||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||||
DEV_BUILD ?= ""
|
|
||||||
|
|
||||||
# Build-time Go variables
|
# Build-time Go variables
|
||||||
PACKAGE?=go.signoz.io/signoz
|
PACKAGE?=go.signoz.io/signoz
|
||||||
@@ -37,10 +38,22 @@ LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildV
|
|||||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||||
|
|
||||||
all: build-push-frontend build-push-query-service
|
all: build-push-frontend build-push-query-service
|
||||||
|
|
||||||
|
# Steps to build static files of frontend
|
||||||
|
build-frontend-static:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building frontend static files"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
|
rm -rf build && \
|
||||||
|
CI=1 yarn install && \
|
||||||
|
yarn build && \
|
||||||
|
ls -l build
|
||||||
|
|
||||||
# Steps to build and push docker image of frontend
|
# Steps to build and push docker image of frontend
|
||||||
.PHONY: build-frontend-amd64 build-push-frontend
|
.PHONY: build-frontend-amd64 build-push-frontend
|
||||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||||
build-frontend-amd64:
|
build-frontend-amd64: build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building frontend docker image for amd64"
|
@echo "--> Building frontend docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -49,7 +62,7 @@ build-frontend-amd64:
|
|||||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of frontend(used in push pipeline)
|
# Step to build and push docker image of frontend(used in push pipeline)
|
||||||
build-push-frontend:
|
build-push-frontend: build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing frontend docker image"
|
@echo "--> Building and pushing frontend docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -57,24 +70,52 @@ build-push-frontend:
|
|||||||
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
||||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
|
# Steps to build static binary of query service
|
||||||
|
.PHONY: build-query-service-static
|
||||||
|
build-query-service-static:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building query-service static binary"
|
||||||
|
@echo "------------------"
|
||||||
|
@if [ $(DEV_BUILD) != "" ]; then \
|
||||||
|
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||||
|
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||||
|
else \
|
||||||
|
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||||
|
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: build-query-service-static-amd64
|
||||||
|
build-query-service-static-amd64:
|
||||||
|
make GOARCH=amd64 build-query-service-static
|
||||||
|
|
||||||
|
.PHONY: build-query-service-static-arm64
|
||||||
|
build-query-service-static-arm64:
|
||||||
|
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
|
||||||
|
|
||||||
|
# Steps to build static binary of query service for all platforms
|
||||||
|
.PHONY: build-query-service-static-all
|
||||||
|
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
|
||||||
|
|
||||||
# Steps to build and push docker image of query service
|
# Steps to build and push docker image of query service
|
||||||
.PHONY: build-query-service-amd64 build-push-query-service
|
.PHONY: build-query-service-amd64 build-push-query-service
|
||||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||||
build-query-service-amd64:
|
build-query-service-amd64: build-query-service-static-amd64
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-query-service:
|
build-push-query-service: build-query-service-static-all
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
--push --platform linux/arm64,linux/amd64 \
|
||||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
||||||
@@ -82,24 +123,14 @@ build-ee-query-service-amd64:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@if [ $(DEV_BUILD) != "" ]; then \
|
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
|
||||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
|
|
||||||
else \
|
|
||||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-ee-query-service:
|
build-push-ee-query-service:
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
|
||||||
--progress plain --push --platform linux/arm64,linux/amd64 \
|
|
||||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
|
||||||
|
|
||||||
dev-setup:
|
dev-setup:
|
||||||
mkdir -p /var/lib/signoz
|
mkdir -p /var/lib/signoz
|
||||||
@@ -110,7 +141,7 @@ dev-setup:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
|
|
||||||
run-local:
|
run-local:
|
||||||
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
|
@docker-compose -f \
|
||||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||||
up --build -d
|
up --build -d
|
||||||
|
|
||||||
@@ -125,6 +156,9 @@ pull-signoz:
|
|||||||
run-signoz:
|
run-signoz:
|
||||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||||
|
|
||||||
|
run-testing:
|
||||||
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
|
||||||
|
|
||||||
down-signoz:
|
down-signoz:
|
||||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||||
|
|
||||||
@@ -151,4 +185,6 @@ test:
|
|||||||
go test ./pkg/query-service/app/querier/...
|
go test ./pkg/query-service/app/querier/...
|
||||||
go test ./pkg/query-service/converter/...
|
go test ./pkg/query-service/converter/...
|
||||||
go test ./pkg/query-service/formatter/...
|
go test ./pkg/query-service/formatter/...
|
||||||
go test ./pkg/query-service/tests/integration/...
|
go test ./pkg/query-service/tests/integration/...
|
||||||
|
go test ./pkg/query-service/rules/...
|
||||||
|
go test ./pkg/query-service/collectorsimulator/...
|
||||||
|
|||||||
@@ -11,7 +11,6 @@
|
|||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
||||||
@@ -40,12 +39,13 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei
|
|||||||
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Anwendung Metriken
|
### Anwendung Metriken
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
### Verteiltes Tracing
|
### Verteiltes Tracing
|
||||||
|
|
||||||
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||||
|
|
||||||
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||||
@@ -62,22 +62,18 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
### Alarme
|
### Alarme
|
||||||
|
|
||||||
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||||
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Werde Teil unserer Slack Community
|
## Werde Teil unserer Slack Community
|
||||||
|
|
||||||
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Funktionen:
|
## Funktionen:
|
||||||
|
|
||||||
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
|
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
|
||||||
@@ -93,7 +89,6 @@ Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Wieso SigNoz?
|
## Wieso SigNoz?
|
||||||
|
|
||||||
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
||||||
@@ -116,12 +111,10 @@ Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als Bibliothek, mit
|
|||||||
- Elixir
|
- Elixir
|
||||||
- Rust
|
- Rust
|
||||||
|
|
||||||
|
|
||||||
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Erste Schritte mit SigNoz
|
## Erste Schritte mit SigNoz
|
||||||
|
|
||||||
### Bereitstellung mit Docker
|
### Bereitstellung mit Docker
|
||||||
@@ -138,7 +131,6 @@ Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelistet
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Vergleiche mit bekannten Tools
|
## Vergleiche mit bekannten Tools
|
||||||
|
|
||||||
### SigNoz vs Prometheus
|
### SigNoz vs Prometheus
|
||||||
@@ -179,7 +171,6 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Zum Projekt beitragen
|
## Zum Projekt beitragen
|
||||||
|
|
||||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||||
@@ -197,6 +188,8 @@ Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #c
|
|||||||
#### Frontend
|
#### Frontend
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Yunus M](https://github.com/YounixM)
|
||||||
|
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||||
|
|
||||||
#### DevOps
|
#### DevOps
|
||||||
|
|
||||||
@@ -204,16 +197,12 @@ Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #c
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Dokumentation
|
## Dokumentation
|
||||||
|
|
||||||
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Gemeinschaft
|
## Gemeinschaft
|
||||||
|
|
||||||
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
|
|||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- Node.js
|
||||||
- Go
|
- Go
|
||||||
- PHP
|
- PHP
|
||||||
- .NET
|
- .NET
|
||||||
@@ -199,10 +199,13 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
|||||||
#### Frontend
|
#### Frontend
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Yunus M](https://github.com/YounixM)
|
||||||
|
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||||
|
|
||||||
#### DevOps
|
#### DevOps
|
||||||
|
|
||||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
- [Dhawal Sanghvi](https://github.com/dhawal1248)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|||||||
203
README.zh-cn.md
@@ -1,170 +1,227 @@
|
|||||||
<p align="center">
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
|
||||||
|
|
||||||
<p align="center">监视你的应用,并可排查已部署应用中的问题,这是一个开源的可替代DataDog、NewRelic的方案</p>
|
<p align="center">监控你的应用,并且可排查已部署应用的问题,这是一个可替代 DataDog、NewRelic 的开源方案</p>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://signoz.io/docs"><b>文档</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||||
|
<a href="https://signoz.io/slack"><b>Slack 社区</b></a> •
|
||||||
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
|
SigNoz 帮助开发人员监控应用并排查已部署应用的问题。你可以使用 SigNoz 实现如下能力:
|
||||||
|
|
||||||
👉 你能看到一些性能指标,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
👉 在同一块面板上,可视化 Metrics, Traces 和 Logs 内容。
|
||||||
|
|
||||||
👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
👉 你可以关注服务的 p99 延迟和错误率, 包括外部 API 调用和个别的端点。
|
||||||
|
|
||||||
👉 聚合trace数据来获得业务相关指标。
|
👉 你可以找到问题的根因,通过提取相关问题的 traces 日志、单独查看请求 traces 的火焰图详情。
|
||||||
|
|
||||||

|
👉 执行 trace 数据聚合,以获取业务相关的 metrics
|
||||||
<br />
|
|
||||||

|
👉 对日志过滤和查询,通过日志的属性建立看板和告警
|
||||||
<br />
|
|
||||||

|
👉 通过 Python,java,Ruby 和 Javascript 自动记录异常
|
||||||
|
|
||||||
|
👉 轻松的自定义查询和设置告警
|
||||||
|
|
||||||
|
### 应用 Metrics 展示
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 分布式追踪
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||||
|
|
||||||
|
### 日志管理
|
||||||
|
|
||||||
|
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||||
|
|
||||||
|
### 基础设施监控
|
||||||
|
|
||||||
|
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||||
|
|
||||||
|
### 异常监控
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 告警
|
||||||
|
|
||||||
|
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
## 加入我们 Slack 社区
|
||||||
|
|
||||||
## 加入我们的Slack社区
|
来 [Slack](https://signoz.io/slack) 和我们打招呼吧 👋
|
||||||
|
|
||||||
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
## 特性:
|
||||||
|
|
||||||
## 功能:
|
- 为 metrics, traces and logs 制定统一的 UI。 无需切换 Prometheus 到 Jaeger 去查找问题,也无需使用想 Elastic 这样的日志工具分开你的 metrics 和 traces
|
||||||
|
|
||||||
- 应用概览指标(metrics),如RPS, p50/p90/p99延迟率分位值,错误率等。
|
- 默认统计应用的 metrics 数据,像 RPS (每秒请求数), 50th/90th/99th 的分位数延迟数据,还有相关的错误率
|
||||||
- 应用中最慢的终端(endpoint)
|
|
||||||
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
- 找到应用中最慢的端点
|
||||||
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
|
|
||||||
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
|
- 查看准确的请求跟踪数据,找到下游服务的问题了,比如 DB 慢查询,或者调用第三方的支付网关等
|
||||||
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
|
|
||||||
|
- 通过 服务名、操作方式、延迟、错误、标签/注释 过滤 traces 数据
|
||||||
|
|
||||||
|
- 通过聚合 trace 数据而获得业务相关的 metrics。 比如你可以通过 `customer_type: gold` 或者 `deployment_version: v2` 或者 `external_call: paypal` 获取错误率和 P99 延迟数据
|
||||||
|
|
||||||
|
- 原生支持 OpenTelemetry 日志,高级日志查询,自动收集 k8s 相关日志
|
||||||
|
|
||||||
|
- 快如闪电的日志分析 ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||||
|
|
||||||
|
- 可视化点到点的基础设施性能,提取有所有类型机器的 metrics 数据
|
||||||
|
|
||||||
|
- 轻易自定义告警查询
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
## 为什么使用 SigNoz?
|
||||||
|
|
||||||
## 为何选择SigNoz?
|
作为开发者, 我们发现 SaaS 厂商对一些大家想要的小功能都是闭源的,这种行为真的让人有点恼火。 闭源厂商还会在月底给你一张没有明细的巨额账单。
|
||||||
|
|
||||||
作为开发人员,我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦,闭源厂商通常会给你一份巨额月付账单,但不提供足够的透明度,你不知道你为哪些功能付费。
|
我们想做一个自托管并且可开源的工具,像 DataDog 和 NewRelic 那样, 为那些担心数据隐私和安全的公司提供第三方服务。
|
||||||
|
|
||||||
我们想做一个自服务的开源版本的工具,类似于DataDog和NewRelic,用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
|
作为开源的项目,你完全可以自己掌控你的配置、样本和更新。你同样可以基于 SigNoz 拓展特定的业务模块。
|
||||||
|
|
||||||
开源也让你对配置、采样和正常运行时间有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
### 支持的编程语言:
|
||||||
|
|
||||||
### 语言支持
|
我们支持 [OpenTelemetry](https://opentelemetry.io)。作为一个观测你应用的库文件。所以任何 OpenTelemetry 支持的框架和语言,对于 SigNoz 也同样支持。 一些主要支持的语言如下:
|
||||||
|
|
||||||
我们支持[OpenTelemetry](https://opentelemetry.io)库,你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
|
|
||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- NodeJS
|
||||||
- Go
|
- Go
|
||||||
|
- PHP
|
||||||
|
- .NET
|
||||||
|
- Ruby
|
||||||
|
- Elixir
|
||||||
|
- Rust
|
||||||
|
|
||||||
你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
|
你可以在这里找到全部支持的语言列表 - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
## 让我们开始吧
|
||||||
|
|
||||||
## 入门
|
### 使用 Docker 部署
|
||||||
|
|
||||||
|
请一步步跟随 [这里](https://signoz.io/docs/install/docker/) 通过 docker 来安装。
|
||||||
|
|
||||||
### 使用Docker部署
|
这个 [排障说明书](https://signoz.io/docs/install/troubleshooting/) 可以帮助你解决碰到的问题。
|
||||||
|
|
||||||
请按照[这里](https://signoz.io/docs/install/docker/)列出的步骤使用Docker来安装
|
|
||||||
|
|
||||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/install/troubleshooting/)会对你有帮助。
|
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
|
### 使用 Helm 在 Kubernetes 部署
|
||||||
|
|
||||||
### 使用Helm在Kubernetes上部署
|
请一步步跟随 [这里](https://signoz.io/docs/deployment/helm_chart) 通过 helm 来安装
|
||||||
|
|
||||||
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
## 比较相似的工具
|
||||||
|
|
||||||
## 与其他方案的比较
|
|
||||||
|
|
||||||
### SigNoz vs Prometheus
|
### SigNoz vs Prometheus
|
||||||
|
|
||||||
如果你只是需要监控指标(metrics),那Prometheus是不错的,但如果你要无缝的在metrics和traces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
Prometheus 是一个针对 metrics 监控的强大工具。但是如果你想无缝的切换 metrics 和 traces 查询,你当前大概率需要在 Prometheus 和 Jaeger 之间切换。
|
||||||
|
|
||||||
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合,这是目前Jaeger缺失的功能。
|
我们的目标是提供一个客户观测 metrics 和 traces 整合的 UI。就像 SaaS 供应商 DataDog,它提供很多 jaeger 缺失的功能,比如针对 traces 过滤功能和聚合功能。
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
### SigNoz vs Jaeger
|
### SigNoz vs Jaeger
|
||||||
|
|
||||||
Jaeger只做分布式追踪(distributed tracing),SigNoz则支持metrics,traces,logs ,即可视化的三大支柱。
|
Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metrics, traces 和 logs 所有的观测。
|
||||||
|
|
||||||
并且SigNoz有一些Jaeger没有的高级功能:
|
而且, SigNoz 相较于 Jaeger 拥有更对的高级功能:
|
||||||
|
|
||||||
- Jaegar UI无法在traces或过滤的traces上展示metrics。
|
- Jaegar UI 不能提供任何基于 traces 的 metrics 查询和过滤。
|
||||||
- Jaeger不能对过滤的traces做聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
|
|
||||||
|
- Jaeger 不能针对过滤的 traces 做聚合。 比如, p99 延迟的请求有个标签是 customer_type='premium'。 而这些在 SigNoz 可以轻松做到。
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Elastic
|
||||||
|
|
||||||
|
- SigNoz 的日志管理是基于 ClickHouse 实现的,可以使日志的聚合更加高效,因为它是基于 OLAP 的数据仓储。
|
||||||
|
|
||||||
|
- 与 Elastic 相比,可以节省 50% 的资源成本
|
||||||
|
|
||||||
|
我们已经公布了 Elastic 和 SigNoz 的性能对比。 请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Loki
|
||||||
|
|
||||||
|
- SigNoz 支持大容量高基数的聚合,但是 loki 是不支持的。
|
||||||
|
|
||||||
|
- SigNoz 支持索引的高基数查询,并且对索引没有数量限制,而 Loki 会在添加部分索引后到达最大上限。
|
||||||
|
|
||||||
|
- 相较于 SigNoz,Loki 在搜索大量数据下既困难又缓慢。
|
||||||
|
|
||||||
|
我们已经发布了基准测试对比 Loki 和 SigNoz 性能。请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
|
||||||
|
|
||||||
## 贡献
|
## 贡献
|
||||||
|
|
||||||
|
我们 ❤️ 你的贡献,无论大小。 请先阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 再开始给 SigNoz 做贡献。
|
||||||
|
|
||||||
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
|
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
|
||||||
|
|
||||||
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
### 项目维护人员
|
||||||
|
|
||||||
### Project maintainers
|
#### 后端
|
||||||
|
|
||||||
#### Backend
|
|
||||||
|
|
||||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||||
- [Vishal Sharma](https://github.com/makeavish)
|
- [Vishal Sharma](https://github.com/makeavish)
|
||||||
|
|
||||||
#### Frontend
|
#### 前端
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Yunus M](https://github.com/YounixM)
|
||||||
|
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||||
|
|
||||||
#### DevOps
|
#### 运维开发
|
||||||
|
|
||||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
|
||||||
|
|
||||||
## 文档
|
## 文档
|
||||||
|
|
||||||
文档在这里:https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失,请在Github里发一个问题,并使用标签 `documentation` 或者在社区stack频道里告诉我们。
|
你可以通过 https://signoz.io/docs/ 找到相关文档。如果你需要阐述问题或者发现一些确实的事件, 通过标签为 `documentation` 提交 Github 问题。或者通过 slack 社区频道。
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
|
||||||
|
|
||||||
## 社区
|
## 社区
|
||||||
|
|
||||||
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
|
加入 [slack 社区](https://signoz.io/slack) 去了解更多关于分布式追踪、可观测性系统 。或者与 SigNoz 其他用户和贡献者交流。
|
||||||
|
|
||||||
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。
|
如果你有任何想法、问题、或者任何反馈, 请通过 [Github Discussions](https://github.com/SigNoz/signoz/discussions) 分享。
|
||||||
|
|
||||||
最后,感谢我们这些优秀的贡献者们。
|
不管怎么样,感谢这个项目的所有贡献者!
|
||||||
|
|
||||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
tty: true
|
tty: true
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
@@ -22,7 +22,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
"wget",
|
"wget",
|
||||||
"--spider",
|
"--spider",
|
||||||
"-q",
|
"-q",
|
||||||
"localhost:8123/ping"
|
"0.0.0.0:8123/ping"
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
@@ -33,12 +33,14 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-clickhouse-depend: &clickhouse-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
- clickhouse
|
- clickhouse
|
||||||
|
- otel-collector-migrator
|
||||||
# - clickhouse-2
|
# - clickhouse-2
|
||||||
# - clickhouse-3
|
# - clickhouse-3
|
||||||
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
@@ -131,7 +133,7 @@ services:
|
|||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.4
|
image: signoz/alertmanager:0.23.5
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
command:
|
command:
|
||||||
@@ -144,11 +146,11 @@ services:
|
|||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.29.3
|
image: signoz/query-service:0.46.0
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"--prefer-delta=true"
|
# "--prefer-delta=true"
|
||||||
]
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
@@ -158,7 +160,7 @@ services:
|
|||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
- DASHBOARDS_PATH=/root/config/dashboards
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
@@ -181,10 +183,10 @@ services:
|
|||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.29.3
|
image: signoz/frontend:0.46.0
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@@ -197,15 +199,17 @@ services:
|
|||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.79.7
|
image: signoz/signoz-otel-collector:0.88.24
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
]
|
]
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||||
@@ -227,26 +231,23 @@ services:
|
|||||||
mode: global
|
mode: global
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
- otel-collector-migrator
|
||||||
|
- query-service
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-otel-collector:0.79.7
|
image: signoz/signoz-schema-migrator:0.88.24
|
||||||
command:
|
deploy:
|
||||||
[
|
restart_policy:
|
||||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
condition: on-failure
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
delay: 5s
|
||||||
]
|
command:
|
||||||
volumes:
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
depends_on:
|
||||||
# ports:
|
- clickhouse
|
||||||
# - "1777:1777" # pprof extension
|
# - clickhouse-2
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
# - clickhouse-3
|
||||||
# - "13133:13133" # Health check extension
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
<<: *clickhouse-depend
|
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
|||||||
@@ -15,13 +15,9 @@ receivers:
|
|||||||
# please remove names from below if you want to collect logs from them
|
# please remove names from below if you want to collect logs from them
|
||||||
- type: filter
|
- type: filter
|
||||||
id: signoz_logs_filter
|
id: signoz_logs_filter
|
||||||
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||||
opencensus:
|
opencensus:
|
||||||
endpoint: 0.0.0.0:55678
|
endpoint: 0.0.0.0:55678
|
||||||
otlp/spanmetrics:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
endpoint: localhost:12345
|
|
||||||
otlp:
|
otlp:
|
||||||
protocols:
|
protocols:
|
||||||
grpc:
|
grpc:
|
||||||
@@ -61,40 +57,6 @@ receivers:
|
|||||||
job_name: otel-collector
|
job_name: otel-collector
|
||||||
|
|
||||||
processors:
|
processors:
|
||||||
logstransform/internal:
|
|
||||||
operators:
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.trace_id
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.span_id
|
|
||||||
output: remove_trace_id
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"traceId" in attributes or "spanId" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.traceId
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.spanId
|
|
||||||
output: remove_traceId
|
|
||||||
- id: remove_traceId
|
|
||||||
type: remove
|
|
||||||
if: '"traceId" in attributes'
|
|
||||||
field: attributes.traceId
|
|
||||||
output: remove_spanId
|
|
||||||
- id: remove_spanId
|
|
||||||
type: remove
|
|
||||||
if: '"spanId" in attributes'
|
|
||||||
field: attributes.spanId
|
|
||||||
- id: remove_trace_id
|
|
||||||
type: remove
|
|
||||||
if: '"trace_id" in attributes'
|
|
||||||
field: attributes.trace_id
|
|
||||||
output: remove_span_id
|
|
||||||
- id: remove_span_id
|
|
||||||
type: remove
|
|
||||||
if: '"span_id" in attributes'
|
|
||||||
field: attributes.span_id
|
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 10000
|
send_batch_size: 10000
|
||||||
send_batch_max_size: 11000
|
send_batch_max_size: 11000
|
||||||
@@ -103,8 +65,8 @@ processors:
|
|||||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
signozspanmetrics/prometheus:
|
signozspanmetrics/cumulative:
|
||||||
metrics_exporter: prometheus
|
metrics_exporter: clickhousemetricswrite
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
dimensions:
|
dimensions:
|
||||||
@@ -115,7 +77,16 @@ processors:
|
|||||||
# This is added to ensure the uniqueness of the timeseries
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
# collectors result in incorrect APM metrics
|
# collectors result in incorrect APM metrics
|
||||||
- name: 'signoz.collector.id'
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
# memory_limiter:
|
# memory_limiter:
|
||||||
# # 80% of maximum memory up to 2G
|
# # 80% of maximum memory up to 2G
|
||||||
# limit_mib: 1500
|
# limit_mib: 1500
|
||||||
@@ -131,33 +102,47 @@ processors:
|
|||||||
# num_workers: 4
|
# num_workers: 4
|
||||||
# queue_size: 100
|
# queue_size: 100
|
||||||
# retry_on_failure: true
|
# retry_on_failure: true
|
||||||
|
signozspanmetrics/delta:
|
||||||
|
metrics_exporter: clickhousemetricswrite
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 100000
|
||||||
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
|
# collectors result in incorrect APM metrics
|
||||||
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
|
|
||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
prometheus:
|
|
||||||
endpoint: 0.0.0.0:8889
|
|
||||||
# logging: {}
|
# logging: {}
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
sending_queue:
|
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure:
|
|
||||||
enabled: true
|
|
||||||
initial_interval: 5s
|
|
||||||
max_interval: 30s
|
|
||||||
max_elapsed_time: 300s
|
|
||||||
|
|
||||||
extensions:
|
extensions:
|
||||||
health_check:
|
health_check:
|
||||||
endpoint: 0.0.0.0:13133
|
endpoint: 0.0.0.0:13133
|
||||||
@@ -174,7 +159,7 @@ service:
|
|||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/prometheus, batch]
|
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
@@ -188,10 +173,7 @@ service:
|
|||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus]
|
exporters: [clickhousemetricswrite/prometheus]
|
||||||
metrics/spanmetrics:
|
|
||||||
receivers: [otlp/spanmetrics]
|
|
||||||
exporters: [prometheus]
|
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, tcplog/docker]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [logstransform/internal, batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter]
|
exporters: [clickhouselogsexporter]
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
receivers:
|
|
||||||
prometheus:
|
|
||||||
config:
|
|
||||||
scrape_configs:
|
|
||||||
# otel-collector-metrics internal metrics
|
|
||||||
- job_name: otel-collector-metrics
|
|
||||||
scrape_interval: 60s
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- localhost:8888
|
|
||||||
labels:
|
|
||||||
job_name: otel-collector-metrics
|
|
||||||
# SigNoz span metrics
|
|
||||||
- job_name: signozspanmetrics-collector
|
|
||||||
scrape_interval: 60s
|
|
||||||
dns_sd_configs:
|
|
||||||
- names:
|
|
||||||
- tasks.otel-collector
|
|
||||||
type: A
|
|
||||||
port: 8889
|
|
||||||
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 10000
|
|
||||||
send_batch_max_size: 11000
|
|
||||||
timeout: 10s
|
|
||||||
# memory_limiter:
|
|
||||||
# # 80% of maximum memory up to 2G
|
|
||||||
# limit_mib: 1500
|
|
||||||
# # 25% of limit up to 2G
|
|
||||||
# spike_limit_mib: 512
|
|
||||||
# check_interval: 5s
|
|
||||||
#
|
|
||||||
# # 50% of the maximum memory
|
|
||||||
# limit_percentage: 50
|
|
||||||
# # 20% of max memory usage spike expected
|
|
||||||
# spike_limit_percentage: 20
|
|
||||||
# queued_retry:
|
|
||||||
# num_workers: 4
|
|
||||||
# queue_size: 100
|
|
||||||
# retry_on_failure: true
|
|
||||||
|
|
||||||
exporters:
|
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
|
||||||
|
|
||||||
extensions:
|
|
||||||
health_check:
|
|
||||||
endpoint: 0.0.0.0:13133
|
|
||||||
zpages:
|
|
||||||
endpoint: 0.0.0.0:55679
|
|
||||||
pprof:
|
|
||||||
endpoint: 0.0.0.0:1777
|
|
||||||
|
|
||||||
service:
|
|
||||||
telemetry:
|
|
||||||
metrics:
|
|
||||||
address: 0.0.0.0:8888
|
|
||||||
extensions: [health_check, zpages, pprof]
|
|
||||||
pipelines:
|
|
||||||
metrics:
|
|
||||||
receivers: [prometheus]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [clickhousemetricswrite]
|
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
server_endpoint: ws://query-service:4320/v1/opamp
|
||||||
@@ -22,4 +22,4 @@ rule_files:
|
|||||||
scrape_configs: []
|
scrape_configs: []
|
||||||
|
|
||||||
remote_read:
|
remote_read:
|
||||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
- url: tcp://clickhouse:9000/signoz_metrics
|
||||||
|
|||||||
@@ -1,8 +1,25 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.1
|
||||||
|
container_name: signoz-zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
container_name: signoz-clickhouse
|
container_name: signoz-clickhouse
|
||||||
# ports:
|
# ports:
|
||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
@@ -11,8 +28,11 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
logging:
|
logging:
|
||||||
options:
|
options:
|
||||||
@@ -26,7 +46,7 @@ services:
|
|||||||
"wget",
|
"wget",
|
||||||
"--spider",
|
"--spider",
|
||||||
"-q",
|
"-q",
|
||||||
"localhost:8123/ping"
|
"0.0.0.0:8123/ping"
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
@@ -34,7 +54,7 @@ services:
|
|||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
image: signoz/alertmanager:0.23.4
|
image: signoz/alertmanager:0.23.5
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -45,18 +65,34 @@ services:
|
|||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
otel-collector:
|
otel-collector:
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
image: signoz/signoz-otel-collector:0.79.7
|
image: signoz/signoz-otel-collector:0.88.24
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
]
|
]
|
||||||
# user: root # required for reading docker container logs
|
# user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
ports:
|
ports:
|
||||||
@@ -75,25 +111,9 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
otel-collector-metrics:
|
condition: service_completed_successfully
|
||||||
container_name: signoz-otel-collector-metrics
|
query-service:
|
||||||
image: signoz/signoz-otel-collector:0.79.7
|
|
||||||
command:
|
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
|
||||||
# ports:
|
|
||||||
# - "1777:1777" # pprof extension
|
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
|
||||||
# - "13133:13133" # Health check extension
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
restart: on-failure
|
|
||||||
depends_on:
|
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
|
|||||||
@@ -4,11 +4,11 @@ services:
|
|||||||
query-service:
|
query-service:
|
||||||
hostname: query-service
|
hostname: query-service
|
||||||
build:
|
build:
|
||||||
context: "../../../pkg/query-service"
|
context: "../../../"
|
||||||
dockerfile: "./Dockerfile"
|
dockerfile: "./pkg/query-service/Dockerfile"
|
||||||
args:
|
args:
|
||||||
LDFLAGS: ""
|
LDFLAGS: ""
|
||||||
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
|
TARGETPLATFORM: "${GOOS}/${GOARCH}"
|
||||||
container_name: signoz-query-service
|
container_name: signoz-query-service
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
@@ -25,7 +25,7 @@ services:
|
|||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"--prefer-delta=true"
|
# "--prefer-delta=true"
|
||||||
]
|
]
|
||||||
ports:
|
ports:
|
||||||
- "6060:6060"
|
- "6060:6060"
|
||||||
@@ -52,8 +52,8 @@ services:
|
|||||||
context: "../../../frontend"
|
context: "../../../frontend"
|
||||||
dockerfile: "./Dockerfile"
|
dockerfile: "./Dockerfile"
|
||||||
args:
|
args:
|
||||||
TARGETOS: "${LOCAL_GOOS}"
|
TARGETOS: "${GOOS}"
|
||||||
TARGETPLATFORM: "${LOCAL_GOARCH}"
|
TARGETPLATFORM: "${GOARCH}"
|
||||||
container_name: signoz-frontend
|
container_name: signoz-frontend
|
||||||
environment:
|
environment:
|
||||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||||
|
|||||||
307
deploy/docker/clickhouse-setup/docker-compose.testing.yaml
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
version: "2.4"
|
||||||
|
|
||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
restart: on-failure
|
||||||
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
|
tty: true
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
# - zookeeper-2
|
||||||
|
# - zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"0.0.0.0:8123/ping"
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-db-depend: &db-depend
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.1
|
||||||
|
container_name: signoz-zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-2:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: signoz-zookeeper-2
|
||||||
|
# hostname: zookeeper-2
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2182:2181"
|
||||||
|
# - "2889:2888"
|
||||||
|
# - "3889:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=2
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-3:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: signoz-zookeeper-3
|
||||||
|
# hostname: zookeeper-3
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2183:2181"
|
||||||
|
# - "2890:2888"
|
||||||
|
# - "3890:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=3
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
clickhouse:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: signoz-clickhouse
|
||||||
|
hostname: clickhouse
|
||||||
|
ports:
|
||||||
|
- "9000:9000"
|
||||||
|
- "8123:8123"
|
||||||
|
- "9181:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
# clickhouse-2:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: signoz-clickhouse-2
|
||||||
|
# hostname: clickhouse-2
|
||||||
|
# ports:
|
||||||
|
# - "9001:9000"
|
||||||
|
# - "8124:8123"
|
||||||
|
# - "9182:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
|
||||||
|
# clickhouse-3:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: signoz-clickhouse-3
|
||||||
|
# hostname: clickhouse-3
|
||||||
|
# ports:
|
||||||
|
# - "9002:9000"
|
||||||
|
# - "8125:8123"
|
||||||
|
# - "9183:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
|
||||||
|
container_name: signoz-alertmanager
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
|
depends_on:
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: on-failure
|
||||||
|
command:
|
||||||
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
|
||||||
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
|
query-service:
|
||||||
|
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
|
||||||
|
container_name: signoz-query-service
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
"-gateway-url=https://api.staging.signoz.cloud"
|
||||||
|
# "--prefer-delta=true"
|
||||||
|
]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
|
- ../dashboards:/root/config/dashboards
|
||||||
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
|
environment:
|
||||||
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
|
- STORAGE=clickhouse
|
||||||
|
- GODEBUG=netdns=go
|
||||||
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8080/api/v1/health"
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
<<: *db-depend
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
|
||||||
|
container_name: signoz-frontend
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
|
- query-service
|
||||||
|
ports:
|
||||||
|
- "3301:3301"
|
||||||
|
volumes:
|
||||||
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
|
otel-collector:
|
||||||
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
|
||||||
|
container_name: signoz-otel-collector
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
|
user: root # required for reading docker container logs
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
|
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||||
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
|
ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
logspout:
|
||||||
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
container_name: signoz-logspout
|
||||||
|
volumes:
|
||||||
|
- /etc/hostname:/etc/host_hostname:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: syslog+tcp://otel-collector:2255
|
||||||
|
depends_on:
|
||||||
|
- otel-collector
|
||||||
|
restart: on-failure
|
||||||
|
|
||||||
|
hotrod:
|
||||||
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
container_name: hotrod
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
command: [ "all" ]
|
||||||
|
environment:
|
||||||
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
|
load-hotrod:
|
||||||
|
image: "signoz/locust:1.2.3"
|
||||||
|
container_name: load-hotrod
|
||||||
|
hostname: load-hotrod
|
||||||
|
environment:
|
||||||
|
ATTACKED_HOST: http://hotrod:8080
|
||||||
|
LOCUST_MODE: standalone
|
||||||
|
NO_PROXY: standalone
|
||||||
|
TASK_DELAY_FROM: 5
|
||||||
|
TASK_DELAY_TO: 30
|
||||||
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
|
volumes:
|
||||||
|
- ../common/locust-scripts:/locust
|
||||||
@@ -3,7 +3,7 @@ version: "2.4"
|
|||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
image: clickhouse/clickhouse-server:23.7.3-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
tty: true
|
tty: true
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper-1
|
- zookeeper-1
|
||||||
@@ -21,7 +21,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
"wget",
|
"wget",
|
||||||
"--spider",
|
"--spider",
|
||||||
"-q",
|
"-q",
|
||||||
"localhost:8123/ping"
|
"0.0.0.0:8123/ping"
|
||||||
]
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
@@ -32,10 +32,12 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-clickhouse-depend: &clickhouse-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
@@ -147,7 +149,7 @@ services:
|
|||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.4}
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
@@ -162,12 +164,12 @@ services:
|
|||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.29.3}
|
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
|
||||||
container_name: signoz-query-service
|
container_name: signoz-query-service
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml"
|
||||||
"--prefer-delta=true"
|
# "--prefer-delta=true"
|
||||||
]
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
@@ -177,7 +179,7 @@ services:
|
|||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
- DASHBOARDS_PATH=/root/config/dashboards
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
@@ -198,10 +200,10 @@ services:
|
|||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.29.3}
|
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
|
||||||
container_name: signoz-frontend
|
container_name: signoz-frontend
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -212,17 +214,34 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.7}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
]
|
]
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
@@ -241,25 +260,13 @@ services:
|
|||||||
# - "55678:55678" # OpenCensus receiver
|
# - "55678:55678" # OpenCensus receiver
|
||||||
# - "55679:55679" # zPages extension
|
# - "55679:55679" # zPages extension
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
<<: *clickhouse-depend
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
otel-collector-metrics:
|
condition: service_healthy
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.7}
|
otel-collector-migrator:
|
||||||
container_name: signoz-otel-collector-metrics
|
condition: service_completed_successfully
|
||||||
command:
|
query-service:
|
||||||
[
|
condition: service_healthy
|
||||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
|
||||||
# ports:
|
|
||||||
# - "1777:1777" # pprof extension
|
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
|
||||||
# - "13133:13133" # Health check extension
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
restart: on-failure
|
|
||||||
<<: *clickhouse-depend
|
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
|||||||
64
deploy/docker/clickhouse-setup/keeper_config.xml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<logger>
|
||||||
|
<!-- Possible levels [1]:
|
||||||
|
|
||||||
|
- none (turns off logging)
|
||||||
|
- fatal
|
||||||
|
- critical
|
||||||
|
- error
|
||||||
|
- warning
|
||||||
|
- notice
|
||||||
|
- information
|
||||||
|
- debug
|
||||||
|
- trace
|
||||||
|
|
||||||
|
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||||
|
-->
|
||||||
|
<level>information</level>
|
||||||
|
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||||
|
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||||
|
<!-- Rotation policy
|
||||||
|
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
|
||||||
|
-->
|
||||||
|
<size>1000M</size>
|
||||||
|
<count>10</count>
|
||||||
|
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||||
|
</logger>
|
||||||
|
|
||||||
|
<listen_host>0.0.0.0</listen_host>
|
||||||
|
<max_connections>4096</max_connections>
|
||||||
|
|
||||||
|
<keeper_server>
|
||||||
|
<tcp_port>9181</tcp_port>
|
||||||
|
|
||||||
|
<!-- Must be unique among all keeper serves -->
|
||||||
|
<server_id>1</server_id>
|
||||||
|
|
||||||
|
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
|
||||||
|
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||||
|
|
||||||
|
<coordination_settings>
|
||||||
|
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||||
|
<min_session_timeout_ms>10000</min_session_timeout_ms>
|
||||||
|
<session_timeout_ms>100000</session_timeout_ms>
|
||||||
|
<raft_logs_level>information</raft_logs_level>
|
||||||
|
<compress_logs>false</compress_logs>
|
||||||
|
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
|
||||||
|
</coordination_settings>
|
||||||
|
|
||||||
|
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
|
||||||
|
<hostname_checks_enabled>true</hostname_checks_enabled>
|
||||||
|
<raft_configuration>
|
||||||
|
<server>
|
||||||
|
<id>1</id>
|
||||||
|
|
||||||
|
<!-- Internal port and hostname -->
|
||||||
|
<hostname>clickhouses-keeper-1</hostname>
|
||||||
|
<port>9234</port>
|
||||||
|
</server>
|
||||||
|
|
||||||
|
<!-- Add more servers here -->
|
||||||
|
|
||||||
|
</raft_configuration>
|
||||||
|
</keeper_server>
|
||||||
|
</clickhouse>
|
||||||
@@ -15,13 +15,9 @@ receivers:
|
|||||||
# please remove names from below if you want to collect logs from them
|
# please remove names from below if you want to collect logs from them
|
||||||
- type: filter
|
- type: filter
|
||||||
id: signoz_logs_filter
|
id: signoz_logs_filter
|
||||||
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||||
opencensus:
|
opencensus:
|
||||||
endpoint: 0.0.0.0:55678
|
endpoint: 0.0.0.0:55678
|
||||||
otlp/spanmetrics:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
endpoint: localhost:12345
|
|
||||||
otlp:
|
otlp:
|
||||||
protocols:
|
protocols:
|
||||||
grpc:
|
grpc:
|
||||||
@@ -62,46 +58,13 @@ receivers:
|
|||||||
|
|
||||||
|
|
||||||
processors:
|
processors:
|
||||||
logstransform/internal:
|
|
||||||
operators:
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.trace_id
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.span_id
|
|
||||||
output: remove_trace_id
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"traceId" in attributes or "spanId" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.traceId
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.spanId
|
|
||||||
output: remove_traceId
|
|
||||||
- id: remove_traceId
|
|
||||||
type: remove
|
|
||||||
if: '"traceId" in attributes'
|
|
||||||
field: attributes.traceId
|
|
||||||
output: remove_spanId
|
|
||||||
- id: remove_spanId
|
|
||||||
type: remove
|
|
||||||
if: '"spanId" in attributes'
|
|
||||||
field: attributes.spanId
|
|
||||||
- id: remove_trace_id
|
|
||||||
type: remove
|
|
||||||
if: '"trace_id" in attributes'
|
|
||||||
field: attributes.trace_id
|
|
||||||
output: remove_span_id
|
|
||||||
- id: remove_span_id
|
|
||||||
type: remove
|
|
||||||
if: '"span_id" in attributes'
|
|
||||||
field: attributes.span_id
|
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 10000
|
send_batch_size: 10000
|
||||||
send_batch_max_size: 11000
|
send_batch_max_size: 11000
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
signozspanmetrics/prometheus:
|
signozspanmetrics/cumulative:
|
||||||
metrics_exporter: prometheus
|
metrics_exporter: clickhousemetricswrite
|
||||||
|
metrics_flush_interval: 60s
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
dimensions:
|
dimensions:
|
||||||
@@ -112,7 +75,16 @@ processors:
|
|||||||
# This is added to ensure the uniqueness of the timeseries
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
# Otherwise, identical timeseries produced by multiple replicas of
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
# collectors result in incorrect APM metrics
|
# collectors result in incorrect APM metrics
|
||||||
- name: 'signoz.collector.id'
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
# memory_limiter:
|
# memory_limiter:
|
||||||
# # 80% of maximum memory up to 2G
|
# # 80% of maximum memory up to 2G
|
||||||
# limit_mib: 1500
|
# limit_mib: 1500
|
||||||
@@ -132,6 +104,31 @@ processors:
|
|||||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
|
signozspanmetrics/delta:
|
||||||
|
metrics_exporter: clickhousemetricswrite
|
||||||
|
metrics_flush_interval: 60s
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 100000
|
||||||
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
|
# collectors result in incorrect APM metrics
|
||||||
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
|
|
||||||
extensions:
|
extensions:
|
||||||
health_check:
|
health_check:
|
||||||
@@ -143,30 +140,20 @@ extensions:
|
|||||||
|
|
||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
prometheus:
|
|
||||||
endpoint: 0.0.0.0:8889
|
|
||||||
# logging: {}
|
|
||||||
|
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
sending_queue:
|
# logging: {}
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure:
|
|
||||||
enabled: true
|
|
||||||
initial_interval: 5s
|
|
||||||
max_interval: 30s
|
|
||||||
max_elapsed_time: 300s
|
|
||||||
|
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
@@ -179,7 +166,7 @@ service:
|
|||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/prometheus, batch]
|
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
@@ -193,10 +180,7 @@ service:
|
|||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus]
|
exporters: [clickhousemetricswrite/prometheus]
|
||||||
metrics/spanmetrics:
|
|
||||||
receivers: [otlp/spanmetrics]
|
|
||||||
exporters: [prometheus]
|
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, tcplog/docker]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [logstransform/internal, batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter]
|
exporters: [clickhouselogsexporter]
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
receivers:
|
|
||||||
otlp:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
http:
|
|
||||||
prometheus:
|
|
||||||
config:
|
|
||||||
scrape_configs:
|
|
||||||
# otel-collector-metrics internal metrics
|
|
||||||
- job_name: otel-collector-metrics
|
|
||||||
scrape_interval: 60s
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- localhost:8888
|
|
||||||
labels:
|
|
||||||
job_name: otel-collector-metrics
|
|
||||||
# SigNoz span metrics
|
|
||||||
- job_name: signozspanmetrics-collector
|
|
||||||
scrape_interval: 60s
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- otel-collector:8889
|
|
||||||
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 10000
|
|
||||||
send_batch_max_size: 11000
|
|
||||||
timeout: 10s
|
|
||||||
# memory_limiter:
|
|
||||||
# # 80% of maximum memory up to 2G
|
|
||||||
# limit_mib: 1500
|
|
||||||
# # 25% of limit up to 2G
|
|
||||||
# spike_limit_mib: 512
|
|
||||||
# check_interval: 5s
|
|
||||||
#
|
|
||||||
# # 50% of the maximum memory
|
|
||||||
# limit_percentage: 50
|
|
||||||
# # 20% of max memory usage spike expected
|
|
||||||
# spike_limit_percentage: 20
|
|
||||||
# queued_retry:
|
|
||||||
# num_workers: 4
|
|
||||||
# queue_size: 100
|
|
||||||
# retry_on_failure: true
|
|
||||||
|
|
||||||
extensions:
|
|
||||||
health_check:
|
|
||||||
endpoint: 0.0.0.0:13133
|
|
||||||
zpages:
|
|
||||||
endpoint: 0.0.0.0:55679
|
|
||||||
pprof:
|
|
||||||
endpoint: 0.0.0.0:1777
|
|
||||||
|
|
||||||
exporters:
|
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
|
||||||
|
|
||||||
service:
|
|
||||||
telemetry:
|
|
||||||
metrics:
|
|
||||||
address: 0.0.0.0:8888
|
|
||||||
extensions:
|
|
||||||
- health_check
|
|
||||||
- zpages
|
|
||||||
- pprof
|
|
||||||
pipelines:
|
|
||||||
metrics:
|
|
||||||
receivers: [prometheus]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [clickhousemetricswrite]
|
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
server_endpoint: ws://query-service:4320/v1/opamp
|
||||||
@@ -22,4 +22,4 @@ rule_files:
|
|||||||
scrape_configs: []
|
scrape_configs: []
|
||||||
|
|
||||||
remote_read:
|
remote_read:
|
||||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
- url: tcp://clickhouse:9000/signoz_metrics
|
||||||
|
|||||||
@@ -534,7 +534,7 @@ else
|
|||||||
echo ""
|
echo ""
|
||||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||||
echo ""
|
echo ""
|
||||||
echo "ℹ️ By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
|
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||||
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||||
|
|||||||
14
e2e/package.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"name": "e2e",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"main": "index.js",
|
||||||
|
"license": "MIT",
|
||||||
|
"devDependencies": {
|
||||||
|
"@playwright/test": "^1.22.0",
|
||||||
|
"@types/node": "^20.9.2"
|
||||||
|
},
|
||||||
|
"scripts": {},
|
||||||
|
"dependencies": {
|
||||||
|
"dotenv": "8.2.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
46
e2e/playwright.config.ts
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import { defineConfig, devices } from "@playwright/test";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
testDir: "./tests",
|
||||||
|
|
||||||
|
fullyParallel: true,
|
||||||
|
|
||||||
|
forbidOnly: !!process.env.CI,
|
||||||
|
|
||||||
|
name: "Signoz E2E",
|
||||||
|
|
||||||
|
retries: process.env.CI ? 2 : 0,
|
||||||
|
|
||||||
|
reporter: process.env.CI ? "github" : "list",
|
||||||
|
|
||||||
|
preserveOutput: "always",
|
||||||
|
|
||||||
|
updateSnapshots: "all",
|
||||||
|
|
||||||
|
quiet: false,
|
||||||
|
|
||||||
|
testMatch: ["**/*.spec.ts"],
|
||||||
|
|
||||||
|
use: {
|
||||||
|
trace: "on-first-retry",
|
||||||
|
|
||||||
|
baseURL:
|
||||||
|
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
|
||||||
|
},
|
||||||
|
|
||||||
|
projects: [
|
||||||
|
{ name: "setup", testMatch: /.*\.setup\.ts/ },
|
||||||
|
{
|
||||||
|
name: "chromium",
|
||||||
|
use: {
|
||||||
|
...devices["Desktop Chrome"],
|
||||||
|
// Use prepared auth state.
|
||||||
|
storageState: ".auth/user.json",
|
||||||
|
},
|
||||||
|
dependencies: ["setup"],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
37
e2e/tests/auth.setup.ts
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import { test, expect } from "@playwright/test";
|
||||||
|
import ROUTES from "../../frontend/src/constants/routes";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const authFile = ".auth/user.json";
|
||||||
|
|
||||||
|
test("E2E Login Test", async ({ page }) => {
|
||||||
|
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
|
||||||
|
|
||||||
|
const signup = "Monitor your applications. Find what is causing issues.";
|
||||||
|
|
||||||
|
const el = await page.locator(`text=${signup}`);
|
||||||
|
|
||||||
|
expect(el).toBeVisible();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.locator("id=loginEmail")
|
||||||
|
.type(
|
||||||
|
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
|
||||||
|
);
|
||||||
|
|
||||||
|
await page.getByText("Next").click();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.locator('input[id="currentPassword"]')
|
||||||
|
.fill(
|
||||||
|
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
|
||||||
|
);
|
||||||
|
|
||||||
|
await page.locator('button[data-attr="signup"]').click();
|
||||||
|
|
||||||
|
await expect(page).toHaveURL(ROUTES.APPLICATION);
|
||||||
|
|
||||||
|
await page.context().storageState({ path: authFile });
|
||||||
|
});
|
||||||
10
e2e/tests/contants.ts
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
export const SERVICE_TABLE_HEADERS = {
|
||||||
|
APPLICATION: "Applicaton",
|
||||||
|
P99LATENCY: "P99 latency (in ms)",
|
||||||
|
ERROR_RATE: "Error Rate (% of total)",
|
||||||
|
OPS_PER_SECOND: "Operations Per Second",
|
||||||
|
};
|
||||||
|
|
||||||
|
export const DATA_TEST_IDS = {
|
||||||
|
NEW_DASHBOARD_BTN: "create-new-dashboard",
|
||||||
|
};
|
||||||
40
e2e/tests/navigation.spec.ts
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
import { test, expect } from "@playwright/test";
|
||||||
|
import ROUTES from "../../frontend/src/constants/routes";
|
||||||
|
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
|
||||||
|
|
||||||
|
test("Basic Navigation Check across different resources", async ({ page }) => {
|
||||||
|
// route to services page and check if the page renders fine with BE contract
|
||||||
|
await Promise.all([
|
||||||
|
page.goto(ROUTES.APPLICATION),
|
||||||
|
page.waitForRequest("**/v1/services"),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const p99Latency = page.locator(
|
||||||
|
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(p99Latency).toBeVisible();
|
||||||
|
|
||||||
|
// route to the new trace explorer page and check if the page renders fine
|
||||||
|
await page.goto(ROUTES.TRACES_EXPLORER);
|
||||||
|
|
||||||
|
await page.waitForLoadState("networkidle");
|
||||||
|
|
||||||
|
const listViewTable = await page
|
||||||
|
.locator('div[role="presentation"]')
|
||||||
|
.isVisible();
|
||||||
|
|
||||||
|
expect(listViewTable).toBeTruthy();
|
||||||
|
|
||||||
|
// route to the dashboards page and check if the page renders fine
|
||||||
|
await Promise.all([
|
||||||
|
page.goto(ROUTES.ALL_DASHBOARD),
|
||||||
|
page.waitForRequest("**/v1/dashboards"),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const newDashboardBtn = await page
|
||||||
|
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
|
||||||
|
.isVisible();
|
||||||
|
|
||||||
|
expect(newDashboardBtn).toBeTruthy();
|
||||||
|
});
|
||||||
46
e2e/yarn.lock
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||||
|
# yarn lockfile v1
|
||||||
|
|
||||||
|
|
||||||
|
"@playwright/test@^1.22.0":
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
|
||||||
|
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
|
||||||
|
dependencies:
|
||||||
|
playwright "1.40.0"
|
||||||
|
|
||||||
|
"@types/node@^20.9.2":
|
||||||
|
version "20.9.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
|
||||||
|
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
|
||||||
|
dependencies:
|
||||||
|
undici-types "~5.26.4"
|
||||||
|
|
||||||
|
dotenv@8.2.0:
|
||||||
|
version "8.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
|
||||||
|
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
|
||||||
|
|
||||||
|
fsevents@2.3.2:
|
||||||
|
version "2.3.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
|
||||||
|
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
|
||||||
|
|
||||||
|
playwright-core@1.40.0:
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
|
||||||
|
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
|
||||||
|
|
||||||
|
playwright@1.40.0:
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
|
||||||
|
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
|
||||||
|
dependencies:
|
||||||
|
playwright-core "1.40.0"
|
||||||
|
optionalDependencies:
|
||||||
|
fsevents "2.3.2"
|
||||||
|
|
||||||
|
undici-types@~5.26.4:
|
||||||
|
version "5.26.5"
|
||||||
|
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
|
||||||
|
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
|
||||||
@@ -1,43 +1,24 @@
|
|||||||
FROM golang:1.21-bookworm AS builder
|
|
||||||
|
|
||||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
|
||||||
ARG LD_FLAGS
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
|
|
||||||
ENV CGO_ENABLED=1
|
|
||||||
ENV GOPATH=/go
|
|
||||||
|
|
||||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
|
||||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
|
|
||||||
|
|
||||||
# Prepare and enter src directory
|
|
||||||
WORKDIR /go/src/github.com/signoz/signoz
|
|
||||||
|
|
||||||
# Add the sources and proceed with build
|
|
||||||
ADD . .
|
|
||||||
RUN cd ee/query-service \
|
|
||||||
&& go build -tags timetzdata -a -o ./bin/query-service \
|
|
||||||
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
|
|
||||||
&& chmod +x ./bin/query-service
|
|
||||||
|
|
||||||
|
|
||||||
# use a minimal alpine image
|
# use a minimal alpine image
|
||||||
FROM alpine:3.16.7
|
FROM alpine:3.18.6
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|
||||||
|
# define arguments that can be passed during build time
|
||||||
|
ARG TARGETOS TARGETARCH
|
||||||
|
|
||||||
# add ca-certificates in case you need them
|
# add ca-certificates in case you need them
|
||||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
# set working directory
|
# set working directory
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
# copy the binary from builder
|
# copy the query-service binary
|
||||||
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
|
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
|
||||||
|
|
||||||
# copy prometheus YAML config
|
# copy prometheus YAML config
|
||||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||||
|
COPY pkg/query-service/templates /root/templates
|
||||||
|
|
||||||
# Make query-service executable for non-root users
|
# Make query-service executable for non-root users
|
||||||
RUN chmod 755 /root /root/query-service
|
RUN chmod 755 /root /root/query-service
|
||||||
@@ -45,7 +26,6 @@ RUN chmod 755 /root /root/query-service
|
|||||||
# run the binary
|
# run the binary
|
||||||
ENTRYPOINT ["./query-service"]
|
ENTRYPOINT ["./query-service"]
|
||||||
|
|
||||||
CMD ["-config", "../config/prometheus.yml"]
|
CMD ["-config", "/root/config/prometheus.yml"]
|
||||||
# CMD ["./query-service -config /root/config/prometheus.yml"]
|
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|||||||
@@ -2,13 +2,17 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
@@ -27,10 +31,13 @@ type APIHandlerOptions struct {
|
|||||||
DialTimeout time.Duration
|
DialTimeout time.Duration
|
||||||
AppDao dao.ModelDao
|
AppDao dao.ModelDao
|
||||||
RulesManager *rules.Manager
|
RulesManager *rules.Manager
|
||||||
|
UsageManager *usage.Manager
|
||||||
FeatureFlags baseint.FeatureLookup
|
FeatureFlags baseint.FeatureLookup
|
||||||
LicenseManager *license.Manager
|
LicenseManager *license.Manager
|
||||||
|
IntegrationsController *integrations.Controller
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
|
Gateway *httputil.ReverseProxy
|
||||||
// Querier Influx Interval
|
// Querier Influx Interval
|
||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
}
|
}
|
||||||
@@ -54,6 +61,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
|||||||
AppDao: opts.AppDao,
|
AppDao: opts.AppDao,
|
||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
FeatureFlags: opts.FeatureFlags,
|
FeatureFlags: opts.FeatureFlags,
|
||||||
|
IntegrationsController: opts.IntegrationsController,
|
||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
Cache: opts.Cache,
|
Cache: opts.Cache,
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
@@ -82,10 +90,18 @@ func (ah *APIHandler) LM() *license.Manager {
|
|||||||
return ah.opts.LicenseManager
|
return ah.opts.LicenseManager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) UM() *usage.Manager {
|
||||||
|
return ah.opts.UsageManager
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||||
return ah.opts.AppDao
|
return ah.opts.AppDao
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
|
||||||
|
return ah.opts.Gateway
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) CheckFeature(f string) bool {
|
func (ah *APIHandler) CheckFeature(f string) bool {
|
||||||
err := ah.FF().CheckFeature(f)
|
err := ah.FF().CheckFeature(f)
|
||||||
return err == nil
|
return err == nil
|
||||||
@@ -143,12 +159,26 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
|||||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
|
||||||
|
|
||||||
// PAT APIs
|
// PAT APIs
|
||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.createPAT)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
||||||
|
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||||
|
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v2/licenses",
|
||||||
|
am.ViewAccess(ah.listLicensesV2)).
|
||||||
|
Methods(http.MethodGet)
|
||||||
|
|
||||||
|
// Gateway
|
||||||
|
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
||||||
|
|
||||||
ah.APIHandler.RegisterRoutes(router, am)
|
ah.APIHandler.RegisterRoutes(router, am)
|
||||||
|
|
||||||
|
|||||||
@@ -5,22 +5,22 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseRequest(r *http.Request, req interface{}) error {
|
func parseRequest(r *http.Request, req interface{}) error {
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := ioutil.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -35,14 +35,14 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
req := basemodel.LoginRequest{}
|
req := basemodel.LoginRequest{}
|
||||||
err := parseRequest(r, &req)
|
err := parseRequest(r, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||||
var apierr basemodel.BaseApiError
|
var apierr *basemodel.ApiError
|
||||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||||
if apierr != nil && !apierr.IsNil() {
|
if apierr != nil && !apierr.IsNil() {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
@@ -50,7 +50,7 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if all looks good, call auth
|
// if all looks good, call auth
|
||||||
resp, err := auth.Login(ctx, &req)
|
resp, err := baseauth.Login(ctx, &req)
|
||||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -71,40 +71,40 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
var req *baseauth.RegisterRequest
|
var req *baseauth.RegisterRequest
|
||||||
|
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := ioutil.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received no input in api\n", err)
|
zap.L().Error("received no input in api", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = json.Unmarshal(requestBody, &req)
|
err = json.Unmarshal(requestBody, &req)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received invalid user registration request", zap.Error(err))
|
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
RespondError(w, basemodel.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// get invite object
|
// get invite object
|
||||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to validate invite token", err)
|
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if invite == nil {
|
if invite == nil {
|
||||||
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
|
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, basemodel.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// get auth domain from email domain
|
// get auth domain from email domain
|
||||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
zap.S().Errorf("failed to get domain from email", apierr)
|
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, basemodel.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
precheckResp := &basemodel.PrecheckResponse{
|
precheckResp := &basemodel.PrecheckResponse{
|
||||||
@@ -120,7 +120,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var precheckError basemodel.BaseApiError
|
var precheckError *basemodel.ApiError
|
||||||
|
|
||||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||||
if precheckError != nil {
|
if precheckError != nil {
|
||||||
@@ -129,8 +129,8 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
// no-sso, validate password
|
// no-sso, validate password
|
||||||
if err := auth.ValidatePassword(req.Password); err != nil {
|
if err := baseauth.ValidatePassword(req.Password); err != nil {
|
||||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
RespondError(w, basemodel.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,7 +155,7 @@ func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -204,24 +204,24 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if !ah.CheckFeature(model.SSO) {
|
if !ah.CheckFeature(model.SSO) {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
q := r.URL.Query()
|
q := r.URL.Query()
|
||||||
if errType := q.Get("error"); errType != "" {
|
if errType := q.Get("error"); errType != "" {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
|
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
relayState := q.Get("state")
|
relayState := q.Get("state")
|
||||||
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||||
|
|
||||||
parsedState, err := url.Parse(relayState)
|
parsedState, err := url.Parse(relayState)
|
||||||
if err != nil || relayState == "" {
|
if err != nil || relayState == "" {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -240,17 +240,22 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
|||||||
// prepare google callback handler using parsedState -
|
// prepare google callback handler using parsedState -
|
||||||
// which contains redirect URL (front-end endpoint)
|
// which contains redirect URL (front-end endpoint)
|
||||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("[receiveGoogleAuth] failed to prepare google oauth provider", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
|
handleSsoError(w, r, redirectUri)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
identity, err := callbackHandler.HandleCallback(r)
|
identity, err := callbackHandler.HandleCallback(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -265,14 +270,14 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if !ah.CheckFeature(model.SSO) {
|
if !ah.CheckFeature(model.SSO) {
|
||||||
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -280,11 +285,11 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
// the relay state is sent when a login request is submitted to
|
// the relay state is sent when a login request is submitted to
|
||||||
// Idp.
|
// Idp.
|
||||||
relayState := r.FormValue("RelayState")
|
relayState := r.FormValue("RelayState")
|
||||||
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||||
|
|
||||||
parsedState, err := url.Parse(relayState)
|
parsedState, err := url.Parse(relayState)
|
||||||
if err != nil || relayState == "" {
|
if err != nil || relayState == "" {
|
||||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -301,34 +306,34 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if assertionInfo.WarningInfo.InvalidTime {
|
if assertionInfo.WarningInfo.InvalidTime {
|
||||||
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
email := assertionInfo.NameID
|
email := assertionInfo.NameID
|
||||||
if email == "" {
|
if email == "" {
|
||||||
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
|
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String()))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
52
ee/query-service/app/api/dashboard.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/common"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ah.lockUnlockDashboard(w, r, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ah.lockUnlockDashboard(w, r, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
|
||||||
|
// Locking can only be done by the owner of the dashboard
|
||||||
|
// or an admin
|
||||||
|
|
||||||
|
// - Fetch the dashboard
|
||||||
|
// - Check if the user is the owner or an admin
|
||||||
|
// - If yes, lock/unlock the dashboard
|
||||||
|
// - If no, return 403
|
||||||
|
|
||||||
|
// Get the dashboard UUID from the request
|
||||||
|
uuid := mux.Vars(r)["uuid"]
|
||||||
|
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user := common.GetUserFromContext(r.Context())
|
||||||
|
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock/Unlock the dashboard
|
||||||
|
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, "Dashboard updated successfully")
|
||||||
|
}
|
||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -27,12 +28,12 @@ func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
|||||||
req := model.OrgDomain{}
|
req := model.OrgDomain{}
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := req.ValidNew(); err != nil {
|
if err := req.ValidNew(); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,18 +51,18 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
|||||||
domainIdStr := mux.Vars(r)["id"]
|
domainIdStr := mux.Vars(r)["id"]
|
||||||
domainId, err := uuid.Parse(domainIdStr)
|
domainId, err := uuid.Parse(domainIdStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
req := model.OrgDomain{Id: domainId}
|
req := model.OrgDomain{Id: domainId}
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
req.Id = domainId
|
req.Id = domainId
|
||||||
if err := req.Valid(nil); err != nil {
|
if err := req.Valid(nil); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
||||||
@@ -77,7 +78,7 @@ func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
domainId, err := uuid.Parse(domainIdStr)
|
domainId, err := uuid.Parse(domainIdStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
RespondError(w, basemodel.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
34
ee/query-service/app/api/gateway.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||||
|
ctx := req.Context()
|
||||||
|
if !strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+gateway.AllowedPrefix) {
|
||||||
|
rw.WriteHeader(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(rw, err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create headers
|
||||||
|
var licenseKey string
|
||||||
|
if license != nil {
|
||||||
|
licenseKey = license.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
|
||||||
|
req.Header.Set("X-Consumer-Username", "lid:00000000-0000-0000-0000-000000000000")
|
||||||
|
req.Header.Set("X-Consumer-Groups", "ns:default")
|
||||||
|
|
||||||
|
ah.Gateway().ServeHTTP(rw, req)
|
||||||
|
}
|
||||||
@@ -4,10 +4,62 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type DayWiseBreakdown struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Breakdown []DayWiseData `json:"breakdown"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DayWiseData struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Count float64 `json:"count"`
|
||||||
|
Size float64 `json:"size"`
|
||||||
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
|
Quantity float64 `json:"quantity"`
|
||||||
|
Total float64 `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tierBreakdown struct {
|
||||||
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
|
Quantity float64 `json:"quantity"`
|
||||||
|
TierStart int64 `json:"tierStart"`
|
||||||
|
TierEnd int64 `json:"tierEnd"`
|
||||||
|
TierCost float64 `json:"tierCost"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type usageResponse struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
Tiers []tierBreakdown `json:"tiers"`
|
||||||
|
DayWiseBreakdown DayWiseBreakdown `json:"dayWiseBreakdown"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type details struct {
|
||||||
|
Total float64 `json:"total"`
|
||||||
|
Breakdown []usageResponse `json:"breakdown"`
|
||||||
|
BaseFee float64 `json:"baseFee"`
|
||||||
|
BillTotal float64 `json:"billTotal"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type billingDetails struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
BillingPeriodStart int64 `json:"billingPeriodStart"`
|
||||||
|
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
|
||||||
|
Details details `json:"details"`
|
||||||
|
Discount float64 `json:"discount"`
|
||||||
|
SubscriptionStatus string `json:"subscriptionStatus"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
@@ -17,20 +69,18 @@ func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
|
||||||
var l model.License
|
var l model.License
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.Key == "" {
|
if l.Key == "" {
|
||||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
||||||
license, apiError := ah.LM().Activate(ctx, l.Key)
|
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
RespondError(w, apiError, nil)
|
RespondError(w, apiError, nil)
|
||||||
return
|
return
|
||||||
@@ -38,3 +88,186 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
ah.Respond(w, license)
|
ah.Respond(w, license)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
type checkoutResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
RedirectURL string `json:"redirectURL"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
licenseResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var resp checkoutResponse
|
||||||
|
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, resp.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||||
|
licenseKey := r.URL.Query().Get("licenseKey")
|
||||||
|
|
||||||
|
if licenseKey == "" {
|
||||||
|
RespondError(w, basemodel.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
billingURL := fmt.Sprintf("%s/usage?licenseKey=%s", constants.LicenseSignozIo, licenseKey)
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("GET", billingURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
billingResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var billingResponse billingDetails
|
||||||
|
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(srikanthccv):Fetch the current day usage and add it to the response
|
||||||
|
ah.Respond(w, billingResponse.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
|
if apiError != nil {
|
||||||
|
RespondError(w, apiError, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := model.Licenses{
|
||||||
|
TrialStart: -1,
|
||||||
|
TrialEnd: -1,
|
||||||
|
OnTrial: false,
|
||||||
|
WorkSpaceBlock: false,
|
||||||
|
TrialConvertedToSubscription: false,
|
||||||
|
GracePeriodEnd: -1,
|
||||||
|
Licenses: licenses,
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentActiveLicenseKey string
|
||||||
|
|
||||||
|
for _, license := range licenses {
|
||||||
|
if license.IsCurrent {
|
||||||
|
currentActiveLicenseKey = license.Key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For the case when no license is applied i.e community edition
|
||||||
|
// There will be no trial details or license details
|
||||||
|
if currentActiveLicenseKey == "" {
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch trial details
|
||||||
|
hClient := &http.Client{}
|
||||||
|
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while creating request for trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
trialResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer trialResp.Body.Close()
|
||||||
|
|
||||||
|
trialRespBody, err := io.ReadAll(trialResp.Body)
|
||||||
|
|
||||||
|
if err != nil || trialResp.StatusCode != http.StatusOK {
|
||||||
|
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var trialRespData model.SubscriptionServerResp
|
||||||
|
|
||||||
|
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
|
||||||
|
zap.L().Error("Error while decoding trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.TrialStart = trialRespData.Data.TrialStart
|
||||||
|
resp.TrialEnd = trialRespData.Data.TrialEnd
|
||||||
|
resp.OnTrial = trialRespData.Data.OnTrial
|
||||||
|
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
|
||||||
|
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
|
||||||
|
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
|
||||||
|
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
type checkoutResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
RedirectURL string `json:"redirectURL"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
licenseResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var resp checkoutResponse
|
||||||
|
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||||
|
RespondError(w, basemodel.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, resp.Data)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,236 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"text/template"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/metrics"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/parser"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
|
||||||
querytemplate "go.signoz.io/signoz/pkg/query-service/utils/queryTemplate"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
|
|
||||||
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
|
|
||||||
ah.APIHandler.QueryRangeMetricsV2(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
|
|
||||||
|
|
||||||
if apiErrorObj != nil {
|
|
||||||
zap.S().Errorf(apiErrorObj.Err.Error())
|
|
||||||
RespondError(w, apiErrorObj, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// prometheus instant query needs same timestamp
|
|
||||||
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
|
|
||||||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.PROM {
|
|
||||||
metricsQueryRangeParams.Start = metricsQueryRangeParams.End
|
|
||||||
}
|
|
||||||
|
|
||||||
// round up the end to nearest multiple
|
|
||||||
if metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER {
|
|
||||||
end := (metricsQueryRangeParams.End) / 1000
|
|
||||||
step := metricsQueryRangeParams.Step
|
|
||||||
metricsQueryRangeParams.End = (end / step * step) * 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
type channelResult struct {
|
|
||||||
Series []*basemodel.Series
|
|
||||||
TableName string
|
|
||||||
Err error
|
|
||||||
Name string
|
|
||||||
Query string
|
|
||||||
}
|
|
||||||
|
|
||||||
execClickHouseQueries := func(queries map[string]string) ([]*basemodel.Series, []string, error, map[string]string) {
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
var tableName []string
|
|
||||||
ch := make(chan channelResult, len(queries))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for name, query := range queries {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(name, query string) {
|
|
||||||
defer wg.Done()
|
|
||||||
seriesList, tableName, err := ah.opts.DataConnector.GetMetricResultEE(r.Context(), query)
|
|
||||||
for _, series := range seriesList {
|
|
||||||
series.QueryName = name
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch <- channelResult{Series: seriesList, TableName: tableName}
|
|
||||||
}(name, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
errQuriesByName := make(map[string]string)
|
|
||||||
// read values from the channel
|
|
||||||
for r := range ch {
|
|
||||||
if r.Err != nil {
|
|
||||||
errs = append(errs, r.Err)
|
|
||||||
errQuriesByName[r.Name] = r.Query
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seriesList = append(seriesList, r.Series...)
|
|
||||||
tableName = append(tableName, r.TableName)
|
|
||||||
}
|
|
||||||
if len(errs) != 0 {
|
|
||||||
return nil, nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
|
|
||||||
}
|
|
||||||
return seriesList, tableName, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
execPromQueries := func(metricsQueryRangeParams *basemodel.QueryRangeParamsV2) ([]*basemodel.Series, error, map[string]string) {
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
ch := make(chan channelResult, len(metricsQueryRangeParams.CompositeMetricQuery.PromQueries))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for name, query := range metricsQueryRangeParams.CompositeMetricQuery.PromQueries {
|
|
||||||
if query.Disabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
wg.Add(1)
|
|
||||||
go func(name string, query *basemodel.PromQuery) {
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
defer wg.Done()
|
|
||||||
tmpl := template.New("promql-query")
|
|
||||||
tmpl, tmplErr := tmpl.Parse(query.Query)
|
|
||||||
if tmplErr != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var queryBuf bytes.Buffer
|
|
||||||
tmplErr = tmpl.Execute(&queryBuf, metricsQueryRangeParams.Variables)
|
|
||||||
if tmplErr != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
query.Query = queryBuf.String()
|
|
||||||
queryModel := basemodel.QueryRangeParams{
|
|
||||||
Start: time.UnixMilli(metricsQueryRangeParams.Start),
|
|
||||||
End: time.UnixMilli(metricsQueryRangeParams.End),
|
|
||||||
Step: time.Duration(metricsQueryRangeParams.Step * int64(time.Second)),
|
|
||||||
Query: query.Query,
|
|
||||||
}
|
|
||||||
promResult, _, err := ah.opts.DataConnector.GetQueryRangeResult(r.Context(), &queryModel)
|
|
||||||
if err != nil {
|
|
||||||
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query.Query}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
matrix, _ := promResult.Matrix()
|
|
||||||
for _, v := range matrix {
|
|
||||||
var s basemodel.Series
|
|
||||||
s.QueryName = name
|
|
||||||
s.Labels = v.Metric.Copy().Map()
|
|
||||||
for _, p := range v.Floats {
|
|
||||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.F})
|
|
||||||
}
|
|
||||||
seriesList = append(seriesList, &s)
|
|
||||||
}
|
|
||||||
ch <- channelResult{Series: seriesList}
|
|
||||||
}(name, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
errQuriesByName := make(map[string]string)
|
|
||||||
// read values from the channel
|
|
||||||
for r := range ch {
|
|
||||||
if r.Err != nil {
|
|
||||||
errs = append(errs, r.Err)
|
|
||||||
errQuriesByName[r.Name] = r.Query
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seriesList = append(seriesList, r.Series...)
|
|
||||||
}
|
|
||||||
if len(errs) != 0 {
|
|
||||||
return nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
|
|
||||||
}
|
|
||||||
return seriesList, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var seriesList []*basemodel.Series
|
|
||||||
var tableName []string
|
|
||||||
var err error
|
|
||||||
var errQuriesByName map[string]string
|
|
||||||
switch metricsQueryRangeParams.CompositeMetricQuery.QueryType {
|
|
||||||
case basemodel.QUERY_BUILDER:
|
|
||||||
runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
|
||||||
if runQueries.Err != nil {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: runQueries.Err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(runQueries.Queries)
|
|
||||||
|
|
||||||
case basemodel.CLICKHOUSE:
|
|
||||||
queries := make(map[string]string)
|
|
||||||
|
|
||||||
for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries {
|
|
||||||
if chQuery.Disabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tmpl := template.New("clickhouse-query")
|
|
||||||
tmpl, err := tmpl.Parse(chQuery.Query)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var query bytes.Buffer
|
|
||||||
|
|
||||||
// replace go template variables
|
|
||||||
querytemplate.AssignReservedVars(metricsQueryRangeParams)
|
|
||||||
|
|
||||||
err = tmpl.Execute(&query, metricsQueryRangeParams.Variables)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
queries[name] = query.String()
|
|
||||||
}
|
|
||||||
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(queries)
|
|
||||||
case basemodel.PROM:
|
|
||||||
seriesList, err, errQuriesByName = execPromQueries(metricsQueryRangeParams)
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("invalid query type")
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, errQuriesByName)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
apiErrObj := &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}
|
|
||||||
RespondError(w, apiErrObj, errQuriesByName)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
|
|
||||||
len(seriesList) > 1 &&
|
|
||||||
(metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER ||
|
|
||||||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.CLICKHOUSE) {
|
|
||||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResponseFormat struct {
|
|
||||||
ResultType string `json:"resultType"`
|
|
||||||
Result []*basemodel.Series `json:"result"`
|
|
||||||
TableName []string `json:"tableName"`
|
|
||||||
}
|
|
||||||
resp := ResponseFormat{ResultType: "matrix", Result: seriesList, TableName: tableName}
|
|
||||||
ah.Respond(w, resp)
|
|
||||||
}
|
|
||||||
@@ -12,6 +12,8 @@ import (
|
|||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,47 +29,114 @@ func generatePATToken() string {
|
|||||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
req := model.PAT{}
|
req := model.CreatePATRequestBody{}
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
user, err := auth.GetUserFromRequest(r)
|
user, err := auth.GetUserFromRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, &model.ApiError{
|
RespondError(w, &basemodel.ApiError{
|
||||||
Typ: model.ErrorUnauthorized,
|
Typ: basemodel.ErrorUnauthorized,
|
||||||
|
Err: err,
|
||||||
|
}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pat := model.PAT{
|
||||||
|
Name: req.Name,
|
||||||
|
Role: req.Role,
|
||||||
|
ExpiresAt: req.ExpiresInDays,
|
||||||
|
}
|
||||||
|
err = validatePATRequest(pat)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// All the PATs are associated with the user creating the PAT.
|
||||||
|
pat.UserID = user.Id
|
||||||
|
pat.CreatedAt = time.Now().Unix()
|
||||||
|
pat.UpdatedAt = time.Now().Unix()
|
||||||
|
pat.LastUsed = 0
|
||||||
|
pat.Token = generatePATToken()
|
||||||
|
|
||||||
|
if pat.ExpiresAt != 0 {
|
||||||
|
// convert expiresAt to unix timestamp from days
|
||||||
|
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||||
|
var apierr *basemodel.ApiError
|
||||||
|
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||||
|
RespondError(w, apierr, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, &pat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatePATRequest(req model.PAT) error {
|
||||||
|
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||||
|
return fmt.Errorf("valid role is required")
|
||||||
|
}
|
||||||
|
if req.ExpiresAt < 0 {
|
||||||
|
return fmt.Errorf("valid expiresAt is required")
|
||||||
|
}
|
||||||
|
if req.Name == "" {
|
||||||
|
return fmt.Errorf("valid name is required")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
req := model.PAT{}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user, err := auth.GetUserFromRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &basemodel.ApiError{
|
||||||
|
Typ: basemodel.ErrorUnauthorized,
|
||||||
Err: err,
|
Err: err,
|
||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// All the PATs are associated with the user creating the PAT. Hence, the permissions
|
err = validatePATRequest(req)
|
||||||
// associated with the PAT is also equivalent to that of the user.
|
if err != nil {
|
||||||
req.UserID = user.Id
|
RespondError(w, basemodel.BadRequest(err), nil)
|
||||||
req.CreatedAt = time.Now().Unix()
|
return
|
||||||
req.Token = generatePATToken()
|
}
|
||||||
|
|
||||||
zap.S().Debugf("Got PAT request: %+v", req)
|
req.UpdatedByUserID = user.Id
|
||||||
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
|
id := mux.Vars(r)["id"]
|
||||||
|
req.UpdatedAt = time.Now().Unix()
|
||||||
|
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||||
|
var apierr *basemodel.ApiError
|
||||||
|
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ah.Respond(w, &req)
|
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
user, err := auth.GetUserFromRequest(r)
|
user, err := auth.GetUserFromRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, &model.ApiError{
|
RespondError(w, &basemodel.ApiError{
|
||||||
Typ: model.ErrorUnauthorized,
|
Typ: basemodel.ErrorUnauthorized,
|
||||||
Err: err,
|
Err: err,
|
||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
zap.S().Infof("Get PATs for user: %+v", user.Id)
|
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
|
||||||
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id)
|
pats, apierr := ah.AppDao().ListPATs(ctx)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
@@ -75,33 +144,22 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.Respond(w, pats)
|
ah.Respond(w, pats)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
id := mux.Vars(r)["id"]
|
id := mux.Vars(r)["id"]
|
||||||
user, err := auth.GetUserFromRequest(r)
|
user, err := auth.GetUserFromRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, &model.ApiError{
|
RespondError(w, &basemodel.ApiError{
|
||||||
Typ: model.ErrorUnauthorized,
|
Typ: basemodel.ErrorUnauthorized,
|
||||||
Err: err,
|
Err: err,
|
||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
|
|
||||||
if apierr != nil {
|
zap.L().Info("Revoke PAT with id", zap.String("id", id))
|
||||||
|
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if pat.UserID != user.Id {
|
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
|
||||||
RespondError(w, &model.ApiError{
|
|
||||||
Typ: model.ErrorUnauthorized,
|
|
||||||
Err: fmt.Errorf("unauthorized PAT delete request"),
|
|
||||||
}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
zap.S().Debugf("Delete PAT with id: %+v", id)
|
|
||||||
if apierr := ah.AppDao().DeletePAT(ctx, id); apierr != nil {
|
|
||||||
RespondError(w, apierr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ah.Respond(w, map[string]string{"data": "pat deleted successfully"})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,6 @@ import (
|
|||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
func RespondError(w http.ResponseWriter, apiErr *basemodel.ApiError, data interface{}) {
|
||||||
baseapp.RespondError(w, apiErr, data)
|
baseapp.RespondError(w, apiErr)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,11 +2,8 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@@ -15,21 +12,17 @@ import (
|
|||||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||||
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
|
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||||
ah.APIHandler.SearchTraces(w, r)
|
ah.APIHandler.SearchTraces(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
traceId, spanId, levelUpInt, levelDownInt, err := baseapp.ParseSearchTracesParams(r)
|
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, "Error reading params")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
|
|
||||||
if err != nil {
|
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||||
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
|
|
||||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,14 +22,14 @@ import (
|
|||||||
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
||||||
|
|
||||||
defer utils.Elapsed("GetMetricResult")()
|
defer utils.Elapsed("GetMetricResult")()
|
||||||
zap.S().Infof("Executing metric result query: %s", query)
|
zap.L().Info("Executing metric result query: ", zap.String("query", query))
|
||||||
|
|
||||||
var hash string
|
var hash string
|
||||||
// If getSubTreeSpans function is used in the clickhouse query
|
// If getSubTreeSpans function is used in the clickhouse query
|
||||||
if strings.Index(query, "getSubTreeSpans(") != -1 {
|
if strings.Contains(query, "getSubTreeSpans(") {
|
||||||
var err error
|
var err error
|
||||||
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
|
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
|
||||||
if err == fmt.Errorf("No spans found for the given query") {
|
if err == fmt.Errorf("no spans found for the given query") {
|
||||||
return nil, "", nil
|
return nil, "", nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -38,9 +38,8 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rows, err := r.conn.Query(ctx, query)
|
rows, err := r.conn.Query(ctx, query)
|
||||||
zap.S().Debug(query)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing query: ", err)
|
zap.L().Error("Error in processing query", zap.Error(err))
|
||||||
return nil, "", fmt.Errorf("error in processing query")
|
return nil, "", fmt.Errorf("error in processing query")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,7 +116,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
|
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.Strings(groupBy)
|
sort.Strings(groupBy)
|
||||||
@@ -140,7 +139,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
}
|
}
|
||||||
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// zap.S().Error("Error in dropping temporary table: ", err)
|
// zap.L().Error("Error in dropping temporary table: ", err)
|
||||||
// return nil, err
|
// return nil, err
|
||||||
// }
|
// }
|
||||||
if hash == "" {
|
if hash == "" {
|
||||||
@@ -152,7 +151,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
|
|
||||||
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
||||||
|
|
||||||
zap.S().Debugf("Executing getSubTreeSpans function")
|
zap.L().Debug("Executing getSubTreeSpans function")
|
||||||
|
|
||||||
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
||||||
|
|
||||||
@@ -162,29 +161,29 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
|
|
||||||
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in dropping temporary table: ", err)
|
zap.L().Error("Error in dropping temporary table", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary table to store the getSubTreeSpans() results
|
// Create temporary table to store the getSubTreeSpans() results
|
||||||
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in creating temporary table: ", err)
|
zap.L().Error("Error in creating temporary table", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
||||||
getSpansSubQuery := subtreeInput
|
getSpansSubQuery := subtreeInput
|
||||||
// Execute the subTree query
|
// Execute the subTree query
|
||||||
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
|
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
|
||||||
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
||||||
|
|
||||||
// zap.S().Info(getSpansSubQuery)
|
// zap.L().Info(getSpansSubQuery)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
return query, hash, fmt.Errorf("error in processing sql query")
|
||||||
}
|
}
|
||||||
|
|
||||||
var searchScanResponses []basemodel.SearchSpanDBResponseItem
|
var searchScanResponses []basemodel.SearchSpanDBResponseItem
|
||||||
@@ -194,18 +193,18 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||||
|
|
||||||
if len(getSpansSubQueryDBResponses) == 0 {
|
if len(getSpansSubQueryDBResponses) == 0 {
|
||||||
return query, hash, fmt.Errorf("No spans found for the given query")
|
return query, hash, fmt.Errorf("no spans found for the given query")
|
||||||
}
|
}
|
||||||
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
|
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
|
||||||
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
return query, hash, fmt.Errorf("error in processing sql query")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process model to fetch the spans
|
// Process model to fetch the spans
|
||||||
zap.S().Debugf("Processing model to fetch the spans")
|
zap.L().Debug("Processing model to fetch the spans")
|
||||||
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
||||||
for _, item := range searchScanResponses {
|
for _, item := range searchScanResponses {
|
||||||
var jsonItem basemodel.SearchSpanResponseItem
|
var jsonItem basemodel.SearchSpanResponseItem
|
||||||
@@ -218,17 +217,17 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
}
|
}
|
||||||
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
||||||
// Use map to store pointer to the spans to avoid duplicates and save memory
|
// Use map to store pointer to the spans to avoid duplicates and save memory
|
||||||
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
|
|
||||||
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
|
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in preparing batch statement: ", err)
|
zap.L().Error("Error in preparing batch statement", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
for _, span := range treeSearchResponse {
|
for _, span := range treeSearchResponse {
|
||||||
@@ -251,19 +250,20 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
span.Events,
|
span.Events,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
err = statement.Send()
|
err = statement.Send()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in sending statement: ", err)
|
zap.L().Error("Error in sending statement", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
return query, hash, nil
|
return query, hash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//lint:ignore SA4009 return hash is feeded to the query
|
||||||
func processQuery(query string, hash string) (string, string, string) {
|
func processQuery(query string, hash string) (string, string, string) {
|
||||||
re3 := regexp.MustCompile(`getSubTreeSpans`)
|
re3 := regexp.MustCompile(`getSubTreeSpans`)
|
||||||
|
|
||||||
@@ -323,7 +323,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
|
|||||||
spans = append(spans, span)
|
spans = append(spans, span)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debug("Building Tree")
|
zap.L().Debug("Building Tree")
|
||||||
roots, err := buildSpanTrees(&spans)
|
roots, err := buildSpanTrees(&spans)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -333,7 +333,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
|
|||||||
// For each root, get the subtree spans
|
// For each root, get the subtree spans
|
||||||
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
||||||
targetSpan := &model.SpanForTraceDetails{}
|
targetSpan := &model.SpanForTraceDetails{}
|
||||||
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
||||||
// Search target span object in the tree
|
// Search target span object in the tree
|
||||||
for _, root := range roots {
|
for _, root := range roots {
|
||||||
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
||||||
@@ -341,7 +341,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,8 +24,9 @@ func NewDataConnector(
|
|||||||
maxIdleConns int,
|
maxIdleConns int,
|
||||||
maxOpenConns int,
|
maxOpenConns int,
|
||||||
dialTimeout time.Duration,
|
dialTimeout time.Duration,
|
||||||
|
cluster string,
|
||||||
) *ClickhouseReader {
|
) *ClickhouseReader {
|
||||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout)
|
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
|
||||||
return &ClickhouseReader{
|
return &ClickhouseReader{
|
||||||
conn: ch.GetConn(),
|
conn: ch.GetConn(),
|
||||||
appdb: localDB,
|
appdb: localDB,
|
||||||
|
|||||||
@@ -13,6 +13,11 @@ import (
|
|||||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||||
var spans []*model.SpanForTraceDetails
|
var spans []*model.SpanForTraceDetails
|
||||||
|
|
||||||
|
// if targetSpanId is null or not present then randomly select a span as targetSpanId
|
||||||
|
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
|
||||||
|
targetSpanId = payload[0].SpanID
|
||||||
|
}
|
||||||
|
|
||||||
// Build a slice of spans from the payload
|
// Build a slice of spans from the payload
|
||||||
for _, spanItem := range payload {
|
for _, spanItem := range payload {
|
||||||
var parentID string
|
var parentID string
|
||||||
@@ -49,14 +54,14 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the target span is not found, return span not found error
|
// If the target span is not found, return span not found error
|
||||||
if targetSpan == nil {
|
if targetSpan == nil {
|
||||||
return nil, errors.New("Span not found")
|
return nil, errors.New("span not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the final result
|
// Build the final result
|
||||||
@@ -113,8 +118,9 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
|||||||
}
|
}
|
||||||
|
|
||||||
searchSpansResult := []basemodel.SearchSpansResult{{
|
searchSpansResult := []basemodel.SearchSpansResult{{
|
||||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
|
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
|
||||||
Events: make([][]interface{}, len(resultSpansSet)),
|
Events: make([][]interface{}, len(resultSpansSet)),
|
||||||
|
IsSubTree: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,7 +192,7 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
|
|||||||
|
|
||||||
// If the parent span is not found, add current span to list of roots
|
// If the parent span is not found, add current span to list of roots
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
|
// zap.L().Debug("Parent Span not found parent_id: ", span.ParentID)
|
||||||
roots = append(roots, span)
|
roots = append(roots, span)
|
||||||
span.ParentID = ""
|
span.ParentID = ""
|
||||||
continue
|
continue
|
||||||
@@ -213,7 +219,7 @@ func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, child := range current.Children {
|
for _, child := range current.Children {
|
||||||
if ok, _ := visited[child.SpanID]; !ok {
|
if ok := visited[child.SpanID]; !ok {
|
||||||
queue = append(queue, child)
|
queue = append(queue, child)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
@@ -20,9 +22,13 @@ import (
|
|||||||
"github.com/soheilhy/cmux"
|
"github.com/soheilhy/cmux"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
|
||||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/usage"
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
@@ -31,10 +37,10 @@ import (
|
|||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
|
||||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||||
@@ -65,14 +71,14 @@ type ServerOptions struct {
|
|||||||
DialTimeout time.Duration
|
DialTimeout time.Duration
|
||||||
CacheConfigPath string
|
CacheConfigPath string
|
||||||
FluxInterval string
|
FluxInterval string
|
||||||
|
Cluster string
|
||||||
|
GatewayUrl string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP api service
|
// Server runs HTTP api service
|
||||||
type Server struct {
|
type Server struct {
|
||||||
serverOptions *ServerOptions
|
serverOptions *ServerOptions
|
||||||
conn net.Listener
|
|
||||||
ruleManager *rules.Manager
|
ruleManager *rules.Manager
|
||||||
separatePorts bool
|
|
||||||
|
|
||||||
// public http router
|
// public http router
|
||||||
httpConn net.Listener
|
httpConn net.Listener
|
||||||
@@ -82,12 +88,11 @@ type Server struct {
|
|||||||
privateConn net.Listener
|
privateConn net.Listener
|
||||||
privateHTTP *http.Server
|
privateHTTP *http.Server
|
||||||
|
|
||||||
// feature flags
|
|
||||||
featureLookup baseint.FeatureLookup
|
|
||||||
|
|
||||||
// Usage manager
|
// Usage manager
|
||||||
usageManager *usage.Manager
|
usageManager *usage.Manager
|
||||||
|
|
||||||
|
opampServer *opamp.Server
|
||||||
|
|
||||||
unavailableChannel chan healthcheck.Status
|
unavailableChannel chan healthcheck.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,8 +119,33 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
localDB.SetMaxOpenConns(10)
|
localDB.SetMaxOpenConns(10)
|
||||||
|
|
||||||
|
gatewayFeature := basemodel.Feature{
|
||||||
|
Name: "GATEWAY",
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
//Activate this feature if the url is not empty
|
||||||
|
var gatewayProxy *httputil.ReverseProxy
|
||||||
|
if serverOptions.GatewayUrl == "" {
|
||||||
|
gatewayFeature.Active = false
|
||||||
|
gatewayProxy, err = gateway.NewNoopProxy()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
zap.L().Info("Enabling gateway feature flag ...")
|
||||||
|
gatewayFeature.Active = true
|
||||||
|
gatewayProxy, err = gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// initiate license manager
|
// initiate license manager
|
||||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
lm, err := licensepkg.StartManager("sqlite", localDB, gatewayFeature)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -127,7 +157,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
var reader interfaces.DataConnector
|
var reader interfaces.DataConnector
|
||||||
storage := os.Getenv("STORAGE")
|
storage := os.Getenv("STORAGE")
|
||||||
if storage == "clickhouse" {
|
if storage == "clickhouse" {
|
||||||
zap.S().Info("Using ClickHouse as datastore ...")
|
zap.L().Info("Using ClickHouse as datastore ...")
|
||||||
qb := db.NewDataConnector(
|
qb := db.NewDataConnector(
|
||||||
localDB,
|
localDB,
|
||||||
serverOptions.PromConfigPath,
|
serverOptions.PromConfigPath,
|
||||||
@@ -135,6 +165,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.MaxIdleConns,
|
serverOptions.MaxIdleConns,
|
||||||
serverOptions.MaxOpenConns,
|
serverOptions.MaxOpenConns,
|
||||||
serverOptions.DialTimeout,
|
serverOptions.DialTimeout,
|
||||||
|
serverOptions.Cluster,
|
||||||
)
|
)
|
||||||
go qb.Start(readerReady)
|
go qb.Start(readerReady)
|
||||||
reader = qb
|
reader = qb
|
||||||
@@ -164,24 +195,38 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initiate opamp
|
// initiate opamp
|
||||||
_, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
_, err = opAmpModel.InitDB(localDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
integrationsController, err := integrations.NewController(localDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"couldn't create integrations controller: %w", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ingestion pipelines manager
|
||||||
|
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||||
|
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// initiate agent config handler
|
// initiate agent config handler
|
||||||
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
|
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||||
return nil, err
|
DB: localDB,
|
||||||
}
|
DBEngine: AppDbEngine,
|
||||||
|
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||||
// ingestion pipelines manager
|
})
|
||||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the usagemanager
|
// start the usagemanager
|
||||||
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
|
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -191,6 +236,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
telemetry.GetInstance().SetReader(reader)
|
telemetry.GetInstance().SetReader(reader)
|
||||||
|
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||||
|
|
||||||
var c cache.Cache
|
var c cache.Cache
|
||||||
if serverOptions.CacheConfigPath != "" {
|
if serverOptions.CacheConfigPath != "" {
|
||||||
@@ -217,11 +263,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
DialTimeout: serverOptions.DialTimeout,
|
DialTimeout: serverOptions.DialTimeout,
|
||||||
AppDao: modelDao,
|
AppDao: modelDao,
|
||||||
RulesManager: rm,
|
RulesManager: rm,
|
||||||
|
UsageManager: usageManager,
|
||||||
FeatureFlags: lm,
|
FeatureFlags: lm,
|
||||||
LicenseManager: lm,
|
LicenseManager: lm,
|
||||||
|
IntegrationsController: integrationsController,
|
||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
Cache: c,
|
Cache: c,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
|
Gateway: gatewayProxy,
|
||||||
}
|
}
|
||||||
|
|
||||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||||
@@ -253,13 +302,18 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
s.privateHTTP = privateServer
|
s.privateHTTP = privateServer
|
||||||
|
|
||||||
|
s.opampServer = opamp.InitializeServer(
|
||||||
|
&opAmpModel.AllAgents, agentConfMgr,
|
||||||
|
)
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
|
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddlewarePrivate)
|
r.Use(loggingMiddlewarePrivate)
|
||||||
@@ -284,35 +338,24 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
|||||||
|
|
||||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
|
|
||||||
|
// add auth middleware
|
||||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||||
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
return auth.GetUserFromRequest(r, apiHandler)
|
||||||
if len(patToken) > 0 {
|
|
||||||
zap.S().Debugf("Received a non-zero length PAT token")
|
|
||||||
ctx := context.Background()
|
|
||||||
dao := apiHandler.AppDao()
|
|
||||||
|
|
||||||
user, err := dao.GetUserByPAT(ctx, patToken)
|
|
||||||
if err == nil && user != nil {
|
|
||||||
zap.S().Debugf("Found valid PAT user: %+v", user)
|
|
||||||
return user, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debugf("Error while getting user for PAT: %+v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return baseauth.GetUserFromRequest(r)
|
|
||||||
}
|
}
|
||||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||||
|
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddleware)
|
r.Use(loggingMiddleware)
|
||||||
|
|
||||||
apiHandler.RegisterRoutes(r, am)
|
apiHandler.RegisterRoutes(r, am)
|
||||||
apiHandler.RegisterMetricsRoutes(r, am)
|
|
||||||
apiHandler.RegisterLogsRoutes(r, am)
|
apiHandler.RegisterLogsRoutes(r, am)
|
||||||
|
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||||
|
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||||
|
|
||||||
c := cors.New(cors.Options{
|
c := cors.New(cors.Options{
|
||||||
AllowedOrigins: []string{"*"},
|
AllowedOrigins: []string{"*"},
|
||||||
@@ -336,7 +379,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
|
|||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
|
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -348,7 +391,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
|||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -373,20 +416,21 @@ func (lrw *loggingResponseWriter) Flush() {
|
|||||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
pathToExtractBodyFromV3 := "/api/v3/query_range"
|
||||||
|
pathToExtractBodyFromV4 := "/api/v4/query_range"
|
||||||
|
|
||||||
data := map[string]interface{}{}
|
data := map[string]interface{}{}
|
||||||
var postData *basemodel.QueryRangeParamsV2
|
var postData *v3.QueryRangeParamsV3
|
||||||
|
|
||||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
bodyBytes, err := ioutil.ReadAll(r.Body)
|
bodyBytes, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
r.Body.Close() // must close
|
r.Body.Close() // must close
|
||||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
json.Unmarshal(bodyBytes, &postData)
|
json.Unmarshal(bodyBytes, &postData)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -397,24 +441,70 @@ func extractDashboardMetaData(path string, r *http.Request) (map[string]interfac
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
signozMetricNotFound := false
|
referrer := r.Header.Get("Referer")
|
||||||
|
|
||||||
|
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the referrer", zap.Error(err))
|
||||||
|
}
|
||||||
|
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the alert: ", zap.Error(err))
|
||||||
|
}
|
||||||
|
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
|
||||||
|
}
|
||||||
|
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
signozMetricsUsed := false
|
||||||
|
signozLogsUsed := false
|
||||||
|
signozTracesUsed := false
|
||||||
if postData != nil {
|
if postData != nil {
|
||||||
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
|
|
||||||
|
|
||||||
if postData.CompositeMetricQuery != nil {
|
if postData.CompositeQuery != nil {
|
||||||
data["queryType"] = postData.CompositeMetricQuery.QueryType
|
data["queryType"] = postData.CompositeQuery.QueryType
|
||||||
data["panelType"] = postData.CompositeMetricQuery.PanelType
|
data["panelType"] = postData.CompositeQuery.PanelType
|
||||||
|
|
||||||
|
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||||
}
|
}
|
||||||
|
|
||||||
data["datasource"] = postData.DataSource
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if signozMetricNotFound {
|
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||||
telemetry.GetInstance().AddActiveMetricsUser()
|
if signozMetricsUsed {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
|
telemetry.GetInstance().AddActiveMetricsUser()
|
||||||
|
}
|
||||||
|
if signozLogsUsed {
|
||||||
|
telemetry.GetInstance().AddActiveLogsUser()
|
||||||
|
}
|
||||||
|
if signozTracesUsed {
|
||||||
|
telemetry.GetInstance().AddActiveTracesUser()
|
||||||
|
}
|
||||||
|
data["metricsUsed"] = signozMetricsUsed
|
||||||
|
data["logsUsed"] = signozLogsUsed
|
||||||
|
data["tracesUsed"] = signozTracesUsed
|
||||||
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
|
if err == nil {
|
||||||
|
// switch case to set data["screen"] based on the referrer
|
||||||
|
switch {
|
||||||
|
case dashboardMatched:
|
||||||
|
data["screen"] = "panel"
|
||||||
|
case alertMatched:
|
||||||
|
data["screen"] = "alert"
|
||||||
|
case logsExplorerMatched:
|
||||||
|
data["screen"] = "logs-explorer"
|
||||||
|
case traceExplorerMatched:
|
||||||
|
data["screen"] = "traces-explorer"
|
||||||
|
default:
|
||||||
|
data["screen"] = "unknown"
|
||||||
|
return data, true
|
||||||
|
}
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, true
|
return data, true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -434,10 +524,12 @@ func getActiveLogs(path string, r *http.Request) {
|
|||||||
|
|
||||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := baseauth.AttachJwtToContext(r.Context(), r)
|
||||||
|
r = r.WithContext(ctx)
|
||||||
route := mux.CurrentRoute(r)
|
route := mux.CurrentRoute(r)
|
||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
|
|
||||||
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
|
queryRangeData, metadataExists := extractQueryRangeData(path, r)
|
||||||
getActiveLogs(path, r)
|
getActiveLogs(path, r)
|
||||||
|
|
||||||
lrw := NewLoggingResponseWriter(w)
|
lrw := NewLoggingResponseWriter(w)
|
||||||
@@ -445,13 +537,16 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
|||||||
|
|
||||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||||
if metadataExists {
|
if metadataExists {
|
||||||
for key, value := range dashboardMetadata {
|
for key, value := range queryRangeData {
|
||||||
data[key] = value
|
data[key] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
@@ -487,7 +582,7 @@ func (s *Server) initListeners() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
||||||
|
|
||||||
// listen on private port to support internal services
|
// listen on private port to support internal services
|
||||||
privateHostPort := s.serverOptions.PrivateHostPort
|
privateHostPort := s.serverOptions.PrivateHostPort
|
||||||
@@ -500,7 +595,7 @@ func (s *Server) initListeners() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -512,7 +607,7 @@ func (s *Server) Start() error {
|
|||||||
if !s.serverOptions.DisableRules {
|
if !s.serverOptions.DisableRules {
|
||||||
s.ruleManager.Start()
|
s.ruleManager.Start()
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.initListeners()
|
err := s.initListeners()
|
||||||
@@ -526,23 +621,23 @@ func (s *Server) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
||||||
|
|
||||||
switch err := s.httpServer.Serve(s.httpConn); err {
|
switch err := s.httpServer.Serve(s.httpConn); err {
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||||
// normal exit, nothing to do
|
// normal exit, nothing to do
|
||||||
default:
|
default:
|
||||||
zap.S().Error("Could not start HTTP server", zap.Error(err))
|
zap.L().Error("Could not start HTTP server", zap.Error(err))
|
||||||
}
|
}
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
zap.L().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
||||||
|
|
||||||
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Could not start pprof server", zap.Error(err))
|
zap.L().Error("Could not start pprof server", zap.Error(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -552,14 +647,14 @@ func (s *Server) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||||
|
|
||||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||||
// normal exit, nothing to do
|
// normal exit, nothing to do
|
||||||
zap.S().Info("private http server closed")
|
zap.L().Info("private http server closed")
|
||||||
default:
|
default:
|
||||||
zap.S().Error("Could not start private HTTP server", zap.Error(err))
|
zap.L().Error("Could not start private HTTP server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
@@ -567,10 +662,10 @@ func (s *Server) Start() error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||||
err := opamp.InitializeAndStartServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
|
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Info("opamp ws server failed to start", err)
|
zap.L().Error("opamp ws server failed to start", zap.Error(err))
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -591,7 +686,7 @@ func (s *Server) Stop() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opamp.StopServer()
|
s.opampServer.Stop()
|
||||||
|
|
||||||
if s.ruleManager != nil {
|
if s.ruleManager != nil {
|
||||||
s.ruleManager.Stop()
|
s.ruleManager.Stop()
|
||||||
@@ -610,7 +705,7 @@ func makeRulesManager(
|
|||||||
db *sqlx.DB,
|
db *sqlx.DB,
|
||||||
ch baseint.Reader,
|
ch baseint.Reader,
|
||||||
disableRules bool,
|
disableRules bool,
|
||||||
fm baseInterface.FeatureLookup) (*rules.Manager, error) {
|
fm baseint.FeatureLookup) (*rules.Manager, error) {
|
||||||
|
|
||||||
// create engine
|
// create engine
|
||||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||||
@@ -638,6 +733,7 @@ func makeRulesManager(
|
|||||||
Logger: nil,
|
Logger: nil,
|
||||||
DisableRules: disableRules,
|
DisableRules: disableRules,
|
||||||
FeatureFlags: fm,
|
FeatureFlags: fm,
|
||||||
|
Reader: ch,
|
||||||
}
|
}
|
||||||
|
|
||||||
// create Manager
|
// create Manager
|
||||||
@@ -646,7 +742,7 @@ func makeRulesManager(
|
|||||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("rules manager is ready")
|
zap.L().Info("rules manager is ready")
|
||||||
|
|
||||||
return manager, nil
|
return manager, nil
|
||||||
}
|
}
|
||||||
|
|||||||
56
ee/query-service/auth/auth.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
|
||||||
|
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
||||||
|
if len(patToken) > 0 {
|
||||||
|
zap.L().Debug("Received a non-zero length PAT token")
|
||||||
|
ctx := context.Background()
|
||||||
|
dao := apiHandler.AppDao()
|
||||||
|
|
||||||
|
pat, err := dao.GetPAT(ctx, patToken)
|
||||||
|
if err == nil && pat != nil {
|
||||||
|
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
|
||||||
|
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||||
|
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
|
||||||
|
return nil, fmt.Errorf("PAT has expired")
|
||||||
|
}
|
||||||
|
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
user, err := dao.GetUser(ctx, pat.UserID)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
telemetry.GetInstance().SetPatTokenUser()
|
||||||
|
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
|
||||||
|
user.User.GroupId = group.Id
|
||||||
|
user.User.Id = pat.Id
|
||||||
|
return &basemodel.UserPayload{
|
||||||
|
User: user.User,
|
||||||
|
Role: pat.Role,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return baseauth.GetUserFromRequest(r)
|
||||||
|
}
|
||||||
@@ -9,8 +9,10 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||||
|
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||||
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||||
|
var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
|
||||||
|
var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
|
||||||
|
|
||||||
func GetOrDefaultEnv(key string, fallback string) string {
|
func GetOrDefaultEnv(key string, fallback string) string {
|
||||||
v := os.Getenv(key)
|
v := os.Getenv(key)
|
||||||
|
|||||||
@@ -21,22 +21,24 @@ type ModelDao interface {
|
|||||||
DB() *sqlx.DB
|
DB() *sqlx.DB
|
||||||
|
|
||||||
// auth methods
|
// auth methods
|
||||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError)
|
||||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError)
|
||||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||||
|
|
||||||
// org domain (auth domains) CRUD ops
|
// org domain (auth domains) CRUD ops
|
||||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError)
|
||||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
|
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError)
|
||||||
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
|
CreateDomain(ctx context.Context, d *model.OrgDomain) *basemodel.ApiError
|
||||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError
|
||||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError
|
||||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError)
|
||||||
|
|
||||||
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
|
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError)
|
||||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError
|
||||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
GetPAT(ctx context.Context, pat string) (*model.PAT, *basemodel.ApiError)
|
||||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) *basemodel.ApiError
|
||||||
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError)
|
GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError)
|
||||||
DeletePAT(ctx context.Context, id string) basemodel.BaseApiError
|
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError)
|
||||||
|
ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError)
|
||||||
|
RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,24 +17,24 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, *basemodel.ApiError) {
|
||||||
// get auth domain from email domain
|
// get auth domain from email domain
|
||||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
|
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
zap.S().Errorf("failed to get domain from email", apierr)
|
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||||
return nil, model.InternalErrorStr("failed to get domain from email")
|
return nil, basemodel.InternalError(fmt.Errorf("failed to get domain from email"))
|
||||||
}
|
}
|
||||||
|
|
||||||
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||||
return nil, model.InternalErrorStr("failed to generate password hash")
|
return nil, basemodel.InternalError(fmt.Errorf("failed to generate password hash"))
|
||||||
}
|
}
|
||||||
|
|
||||||
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
|
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
|
||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
|
|||||||
|
|
||||||
user, apiErr = m.CreateUser(ctx, user, false)
|
user, apiErr = m.CreateUser(ctx, user, false)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
|
zap.L().Error("CreateUser failed", zap.Error(apiErr))
|
||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,12 +61,12 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
|
|||||||
|
|
||||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||||
// is successfully parsed (i.e. valid email is available)
|
// is successfully parsed (i.e. valid email is available)
|
||||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr *basemodel.ApiError) {
|
||||||
|
|
||||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||||
if !apierr.IsNil() {
|
if !apierr.IsNil() {
|
||||||
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
|
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
|
||||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
return "", basemodel.BadRequest(fmt.Errorf("invalid user email received from the auth provider"))
|
||||||
}
|
}
|
||||||
|
|
||||||
user := &basemodel.User{}
|
user := &basemodel.User{}
|
||||||
@@ -75,7 +75,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||||
user = newUser
|
user = newUser
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error())
|
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
|
||||||
return "", apiErr
|
return "", apiErr
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -84,8 +84,8 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
|
|
||||||
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to generate token for SSO login user", err)
|
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
return "", basemodel.InternalError(fmt.Errorf("failed to generate token for the user"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||||
@@ -95,7 +95,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
tokenStore.RefreshJwt), nil
|
tokenStore.RefreshJwt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
|
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, *basemodel.ApiError) {
|
||||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
return false, apierr
|
return false, apierr
|
||||||
@@ -110,7 +110,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
|||||||
}
|
}
|
||||||
|
|
||||||
if userPayload.Role != baseconst.AdminGroup {
|
if userPayload.Role != baseconst.AdminGroup {
|
||||||
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
|
return false, basemodel.BadRequest(fmt.Errorf("auth method not supported"))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -120,7 +120,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
|||||||
|
|
||||||
// PrecheckLogin is called when the login or signup page is loaded
|
// PrecheckLogin is called when the login or signup page is loaded
|
||||||
// to check sso login is to be prompted
|
// to check sso login is to be prompted
|
||||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, *basemodel.ApiError) {
|
||||||
|
|
||||||
// assume user is valid unless proven otherwise
|
// assume user is valid unless proven otherwise
|
||||||
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||||
@@ -143,8 +143,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
// do nothing, just skip sso
|
// do nothing, just skip sso
|
||||||
ssoAvailable = false
|
ssoAvailable = false
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||||
return resp, model.BadRequest(err)
|
return resp, &basemodel.ApiError{Err: err, Typ: basemodel.ErrorBadData}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,7 +160,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
if len(emailComponents) > 0 {
|
if len(emailComponents) > 0 {
|
||||||
emailDomain = emailComponents[1]
|
emailDomain = emailComponents[1]
|
||||||
}
|
}
|
||||||
zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
|
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||||
return resp, apierr
|
return resp, apierr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,8 +176,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||||
siteUrl, err := url.Parse(escapedUrl)
|
siteUrl, err := url.Parse(escapedUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse referer", err)
|
zap.L().Error("failed to parse referer", zap.Error(err))
|
||||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
return resp, basemodel.InternalError(fmt.Errorf("failed to generate login request"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// build Idp URL that will authenticat the user
|
// build Idp URL that will authenticat the user
|
||||||
@@ -185,8 +185,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
|
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||||
return resp, model.InternalError(err)
|
return resp, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set SSO to true, as the url is generated correctly
|
// set SSO to true, as the url is generated correctly
|
||||||
|
|||||||
@@ -48,13 +48,13 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
|||||||
if domainIdStr != "" {
|
if domainIdStr != "" {
|
||||||
domainId, err := uuid.Parse(domainIdStr)
|
domainId, err := uuid.Parse(domainIdStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse domainId from relay state", err)
|
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
|
||||||
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
||||||
}
|
}
|
||||||
|
|
||||||
domain, err = m.GetDomain(ctx, domainId)
|
domain, err = m.GetDomain(ctx, domainId)
|
||||||
if (err != nil) || domain == nil {
|
if (err != nil) || domain == nil {
|
||||||
zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error())
|
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
|
||||||
return nil, fmt.Errorf("invalid credentials")
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
|||||||
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||||
domain = domainFromDB
|
domain = domainFromDB
|
||||||
if (err != nil) || domain == nil {
|
if (err != nil) || domain == nil {
|
||||||
zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error())
|
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
|
||||||
return nil, fmt.Errorf("invalid credentials")
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -76,47 +76,47 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetDomainByName returns org domain for a given domain name
|
// GetDomainByName returns org domain for a given domain name
|
||||||
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, *basemodel.ApiError) {
|
||||||
|
|
||||||
stored := StoredDomain{}
|
stored := StoredDomain{}
|
||||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
|
return nil, basemodel.BadRequest(fmt.Errorf("invalid domain name"))
|
||||||
}
|
}
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDomain returns org domain for a given domain id
|
// GetDomain returns org domain for a given domain id
|
||||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
|
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, *basemodel.ApiError) {
|
||||||
|
|
||||||
stored := StoredDomain{}
|
stored := StoredDomain{}
|
||||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
|
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
|
return nil, basemodel.BadRequest(fmt.Errorf("invalid domain id"))
|
||||||
}
|
}
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListDomains gets the list of auth domains by org id
|
// ListDomains gets the list of auth domains by org id
|
||||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
|
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, *basemodel.ApiError) {
|
||||||
domains := []model.OrgDomain{}
|
domains := []model.OrgDomain{}
|
||||||
|
|
||||||
stored := []StoredDomain{}
|
stored := []StoredDomain{}
|
||||||
@@ -126,13 +126,13 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
|||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return []model.OrgDomain{}, nil
|
return []model.OrgDomain{}, nil
|
||||||
}
|
}
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range stored {
|
for _, s := range stored {
|
||||||
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
||||||
if err := domain.LoadConfig(s.Data); err != nil {
|
if err := domain.LoadConfig(s.Data); err != nil {
|
||||||
zap.S().Errorf("ListDomains() failed", zap.Error(err))
|
zap.L().Error("ListDomains() failed", zap.Error(err))
|
||||||
}
|
}
|
||||||
domains = append(domains, domain)
|
domains = append(domains, domain)
|
||||||
}
|
}
|
||||||
@@ -141,20 +141,20 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDomain creates a new auth domain
|
// CreateDomain creates a new auth domain
|
||||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError {
|
||||||
|
|
||||||
if domain.Id == uuid.Nil {
|
if domain.Id == uuid.Nil {
|
||||||
domain.Id = uuid.New()
|
domain.Id = uuid.New()
|
||||||
}
|
}
|
||||||
|
|
||||||
if domain.OrgId == "" || domain.Name == "" {
|
if domain.OrgId == "" || domain.Name == "" {
|
||||||
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
return basemodel.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
||||||
}
|
}
|
||||||
|
|
||||||
configJson, err := json.Marshal(domain)
|
configJson, err := json.Marshal(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
|
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
return basemodel.InternalError(fmt.Errorf("domain creation failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = m.DB().ExecContext(ctx,
|
_, err = m.DB().ExecContext(ctx,
|
||||||
@@ -167,25 +167,25 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
time.Now().Unix())
|
time.Now().Unix())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to insert domain in db", zap.Error(err))
|
zap.L().Error("failed to insert domain in db", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
return basemodel.InternalError(fmt.Errorf("domain creation failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDomain updates stored config params for a domain
|
// UpdateDomain updates stored config params for a domain
|
||||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) *basemodel.ApiError {
|
||||||
|
|
||||||
if domain.Id == uuid.Nil {
|
if domain.Id == uuid.Nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return basemodel.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
configJson, err := json.Marshal(domain)
|
configJson, err := json.Marshal(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
zap.L().Error("domain update failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return basemodel.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = m.DB().ExecContext(ctx,
|
_, err = m.DB().ExecContext(ctx,
|
||||||
@@ -195,19 +195,19 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
domain.Id)
|
domain.Id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
zap.L().Error("domain update failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return basemodel.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteDomain deletes an org domain
|
// DeleteDomain deletes an org domain
|
||||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) *basemodel.ApiError {
|
||||||
|
|
||||||
if id == uuid.Nil {
|
if id == uuid.Nil {
|
||||||
zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
return basemodel.InternalError(fmt.Errorf("domain delete failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := m.DB().ExecContext(ctx,
|
_, err := m.DB().ExecContext(ctx,
|
||||||
@@ -215,22 +215,22 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
|
|||||||
id)
|
id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain delete failed", zap.Error(err))
|
zap.L().Error("domain delete failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
return basemodel.InternalError(fmt.Errorf("domain delete failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
|
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, *basemodel.ApiError) {
|
||||||
|
|
||||||
if email == "" {
|
if email == "" {
|
||||||
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
return nil, basemodel.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
||||||
}
|
}
|
||||||
|
|
||||||
components := strings.Split(email, "@")
|
components := strings.Split(email, "@")
|
||||||
if len(components) < 2 {
|
if len(components) < 2 {
|
||||||
return nil, model.BadRequest(fmt.Errorf("invalid email address"))
|
return nil, basemodel.BadRequest(fmt.Errorf("invalid email address"))
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedDomain := components[1]
|
parsedDomain := components[1]
|
||||||
@@ -242,12 +242,12 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
|||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type modelDao struct {
|
type modelDao struct {
|
||||||
@@ -28,6 +29,41 @@ func (m *modelDao) checkFeature(key string) error {
|
|||||||
return m.flags.CheckFeature(key)
|
return m.flags.CheckFeature(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||||
|
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
|
||||||
|
rows, err := db.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to query table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
cid int
|
||||||
|
name string
|
||||||
|
ctype string
|
||||||
|
notnull int
|
||||||
|
dflt_value *string
|
||||||
|
pk int
|
||||||
|
)
|
||||||
|
for rows.Next() {
|
||||||
|
err := rows.Scan(&cid, &name, &ctype, ¬null, &dflt_value, &pk)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if name == columnName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = rows.Err()
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// InitDB creates and extends base model DB repository
|
// InitDB creates and extends base model DB repository
|
||||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||||
dao, err := basedsql.InitDB(dataSourceName)
|
dao, err := basedsql.InitDB(dataSourceName)
|
||||||
@@ -51,11 +87,16 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
|||||||
);
|
);
|
||||||
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
role TEXT NOT NULL,
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
token TEXT NOT NULL UNIQUE,
|
token TEXT NOT NULL UNIQUE,
|
||||||
name TEXT NOT NULL,
|
name TEXT NOT NULL,
|
||||||
created_at INTEGER NOT NULL,
|
created_at INTEGER NOT NULL,
|
||||||
expires_at INTEGER NOT NULL,
|
expires_at INTEGER NOT NULL,
|
||||||
|
updated_at INTEGER NOT NULL,
|
||||||
|
last_used INTEGER NOT NULL,
|
||||||
|
revoked BOOLEAN NOT NULL,
|
||||||
|
updated_by_user_id TEXT NOT NULL,
|
||||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||||
);
|
);
|
||||||
`
|
`
|
||||||
@@ -65,6 +106,36 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
|||||||
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "role") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,56 +3,148 @@ package sqlite
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
|
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, *basemodel.ApiError) {
|
||||||
_, err := m.DB().ExecContext(ctx,
|
result, err := m.DB().ExecContext(ctx,
|
||||||
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||||
p.UserID,
|
p.UserID,
|
||||||
p.Token,
|
p.Token,
|
||||||
|
p.Role,
|
||||||
p.Name,
|
p.Name,
|
||||||
p.CreatedAt,
|
p.CreatedAt,
|
||||||
p.ExpiresAt)
|
p.ExpiresAt,
|
||||||
|
p.UpdatedAt,
|
||||||
|
p.UpdatedByUserID,
|
||||||
|
p.LastUsed,
|
||||||
|
p.Revoked,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("PAT insertion failed"))
|
return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
|
}
|
||||||
|
id, err := result.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||||
|
return model.PAT{}, basemodel.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
|
}
|
||||||
|
p.Id = strconv.Itoa(int(id))
|
||||||
|
createdByUser, _ := m.GetUser(ctx, p.UserID)
|
||||||
|
if createdByUser == nil {
|
||||||
|
p.CreatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.CreatedByUser = model.User{
|
||||||
|
Id: createdByUser.Id,
|
||||||
|
Name: createdByUser.Name,
|
||||||
|
Email: createdByUser.Email,
|
||||||
|
CreatedAt: createdByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) *basemodel.ApiError {
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
|
||||||
|
p.Role,
|
||||||
|
p.Name,
|
||||||
|
p.UpdatedAt,
|
||||||
|
p.UpdatedByUserID,
|
||||||
|
id)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
|
||||||
|
return basemodel.InternalError(fmt.Errorf("PAT update failed"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) *basemodel.ApiError {
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
|
||||||
|
lastUsed,
|
||||||
|
token)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||||
|
return basemodel.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, *basemodel.ApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=?;`, userID); err != nil {
|
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
|
||||||
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err))
|
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||||
|
}
|
||||||
|
for i := range pats {
|
||||||
|
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
|
||||||
|
if createdByUser == nil {
|
||||||
|
pats[i].CreatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pats[i].CreatedByUser = model.User{
|
||||||
|
Id: createdByUser.Id,
|
||||||
|
Name: createdByUser.Name,
|
||||||
|
Email: createdByUser.Email,
|
||||||
|
CreatedAt: createdByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
|
||||||
|
if updatedByUser == nil {
|
||||||
|
pats[i].UpdatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pats[i].UpdatedByUser = model.User{
|
||||||
|
Id: updatedByUser.Id,
|
||||||
|
Name: updatedByUser.Name,
|
||||||
|
Email: updatedByUser.Email,
|
||||||
|
CreatedAt: updatedByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: updatedByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return pats, nil
|
return pats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiError {
|
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) *basemodel.ApiError {
|
||||||
_, err := m.DB().ExecContext(ctx, `DELETE from personal_access_tokens where id=?;`, id)
|
updatedAt := time.Now().Unix()
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
|
||||||
|
userID, updatedAt, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to delete PAT, err: %v", zap.Error(err))
|
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("failed to delete PAT"))
|
return basemodel.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, *basemodel.ApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=?;`, token); err != nil {
|
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pats) != 1 {
|
if len(pats) != 1 {
|
||||||
return nil, &model.ApiError{
|
return nil, &basemodel.ApiError{
|
||||||
Typ: model.ErrorInternal,
|
Typ: basemodel.ErrorInternal,
|
||||||
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
|
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -60,16 +152,16 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
|
|||||||
return &pats[0], nil
|
return &pats[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, *basemodel.ApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=?;`, id); err != nil {
|
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pats) != 1 {
|
if len(pats) != 1 {
|
||||||
return nil, &model.ApiError{
|
return nil, &basemodel.ApiError{
|
||||||
Typ: model.ErrorInternal,
|
Typ: basemodel.ErrorInternal,
|
||||||
Err: fmt.Errorf("found zero or multiple PATs with same token"),
|
Err: fmt.Errorf("found zero or multiple PATs with same token"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -77,7 +169,8 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
|
|||||||
return &pats[0], nil
|
return &pats[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
// deprecated
|
||||||
|
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, *basemodel.ApiError) {
|
||||||
users := []basemodel.UserPayload{}
|
users := []basemodel.UserPayload{}
|
||||||
|
|
||||||
query := `SELECT
|
query := `SELECT
|
||||||
@@ -90,15 +183,15 @@ func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.U
|
|||||||
u.org_id,
|
u.org_id,
|
||||||
u.group_id
|
u.group_id
|
||||||
FROM users u, personal_access_tokens p
|
FROM users u, personal_access_tokens p
|
||||||
WHERE u.id = p.user_id and p.token=?;`
|
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
|
||||||
|
|
||||||
if err := m.DB().Select(&users, query, token); err != nil {
|
if err := m.DB().Select(&users, query, token); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
return nil, basemodel.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(users) != 1 {
|
if len(users) != 1 {
|
||||||
return nil, &model.ApiError{
|
return nil, &basemodel.ApiError{
|
||||||
Typ: model.ErrorInternal,
|
Typ: basemodel.ErrorInternal,
|
||||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
9
ee/query-service/integrations/gateway/noop.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http/httputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewNoopProxy() (*httputil.ReverseProxy, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
66
ee/query-service/integrations/gateway/proxy.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RoutePrefix string = "/api/gateway"
|
||||||
|
AllowedPrefix string = "/v1/workspaces/me"
|
||||||
|
)
|
||||||
|
|
||||||
|
type proxy struct {
|
||||||
|
url *url.URL
|
||||||
|
stripPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProxy(u string, stripPath string) (*httputil.ReverseProxy, error) {
|
||||||
|
url, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy := &proxy{url: url, stripPath: stripPath}
|
||||||
|
|
||||||
|
return &httputil.ReverseProxy{
|
||||||
|
Rewrite: proxy.rewrite,
|
||||||
|
ModifyResponse: proxy.modifyResponse,
|
||||||
|
ErrorHandler: proxy.errorHandler,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxy) rewrite(pr *httputil.ProxyRequest) {
|
||||||
|
pr.SetURL(p.url)
|
||||||
|
pr.SetXForwarded()
|
||||||
|
pr.Out.URL.Path = cleanPath(strings.ReplaceAll(pr.Out.URL.Path, p.stripPath, ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxy) modifyResponse(res *http.Response) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxy) errorHandler(rw http.ResponseWriter, req *http.Request, err error) {
|
||||||
|
rw.WriteHeader(http.StatusBadGateway)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanPath(p string) string {
|
||||||
|
if p == "" {
|
||||||
|
return "/"
|
||||||
|
}
|
||||||
|
if p[0] != '/' {
|
||||||
|
p = "/" + p
|
||||||
|
}
|
||||||
|
np := path.Clean(p)
|
||||||
|
if p[len(p)-1] == '/' && np != "/" {
|
||||||
|
if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
|
||||||
|
np = p
|
||||||
|
} else {
|
||||||
|
np += "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return np
|
||||||
|
}
|
||||||
61
ee/query-service/integrations/gateway/proxy_test.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxyRewrite(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
url *url.URL
|
||||||
|
stripPath string
|
||||||
|
in *url.URL
|
||||||
|
expected *url.URL
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "SamePathAdded",
|
||||||
|
url: &url.URL{Scheme: "http", Host: "backend", Path: "/path1"},
|
||||||
|
stripPath: "/strip",
|
||||||
|
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
|
||||||
|
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/path1/path1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoStripPathInput",
|
||||||
|
url: &url.URL{Scheme: "http", Host: "backend"},
|
||||||
|
stripPath: "",
|
||||||
|
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
|
||||||
|
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/strip/path1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoStripPathPresentInReq",
|
||||||
|
url: &url.URL{Scheme: "http", Host: "backend"},
|
||||||
|
stripPath: "/not-found",
|
||||||
|
in: &url.URL{Scheme: "http", Host: "localhost", Path: "/strip/path1"},
|
||||||
|
expected: &url.URL{Scheme: "http", Host: "backend", Path: "/strip/path1"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
proxy, err := NewProxy(tc.url.String(), tc.stripPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
inReq, err := http.NewRequest(http.MethodGet, tc.in.String(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
proxyReq := &httputil.ProxyRequest{
|
||||||
|
In: inReq,
|
||||||
|
Out: inReq.Clone(context.Background()),
|
||||||
|
}
|
||||||
|
proxy.Rewrite(proxyReq)
|
||||||
|
|
||||||
|
assert.Equal(t, tc.expected.Host, proxyReq.Out.URL.Host)
|
||||||
|
assert.Equal(t, tc.expected.Scheme, proxyReq.Out.URL.Scheme)
|
||||||
|
assert.Equal(t, tc.expected.Path, proxyReq.Out.URL.Path)
|
||||||
|
assert.Equal(t, tc.expected.Query(), proxyReq.Out.URL.Query())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,11 +2,6 @@ package signozio
|
|||||||
|
|
||||||
type status string
|
type status string
|
||||||
|
|
||||||
const (
|
|
||||||
statusSuccess status = "success"
|
|
||||||
statusError status = "error"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ActivationResult struct {
|
type ActivationResult struct {
|
||||||
Status status `json:"status"`
|
Status status `json:"status"`
|
||||||
Data *ActivationResponse `json:"data,omitempty"`
|
Data *ActivationResponse `json:"data,omitempty"`
|
||||||
|
|||||||
@@ -6,13 +6,14 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.uber.org/zap"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
var C *Client
|
var C *Client
|
||||||
@@ -37,7 +38,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ActivateLicense sends key to license.signoz.io and gets activation data
|
// ActivateLicense sends key to license.signoz.io and gets activation data
|
||||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
|
func ActivateLicense(key, siteId string) (*ActivationResponse, *basemodel.ApiError) {
|
||||||
licenseReq := map[string]string{
|
licenseReq := map[string]string{
|
||||||
"key": key,
|
"key": key,
|
||||||
"siteId": siteId,
|
"siteId": siteId,
|
||||||
@@ -47,14 +48,14 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to connect to license.signoz.io", err)
|
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
||||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
return nil, basemodel.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
httpBody, err := ioutil.ReadAll(httpResponse.Body)
|
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
|
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
||||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
return nil, basemodel.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer httpResponse.Body.Close()
|
defer httpResponse.Body.Close()
|
||||||
@@ -63,23 +64,23 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
result := ActivationResult{}
|
result := ActivationResult{}
|
||||||
err = json.Unmarshal(httpBody, &result)
|
err = json.Unmarshal(httpBody, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
|
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
||||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
return nil, basemodel.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||||
}
|
}
|
||||||
|
|
||||||
switch httpResponse.StatusCode {
|
switch httpResponse.StatusCode {
|
||||||
case 200, 201:
|
case 200, 201:
|
||||||
return result.Data, nil
|
return result.Data, nil
|
||||||
case 400, 401:
|
case 400, 401:
|
||||||
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
return nil, basemodel.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||||
default:
|
default:
|
||||||
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
return nil, basemodel.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateLicense validates the license key
|
// ValidateLicense validates the license key
|
||||||
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
|
func ValidateLicense(activationId string) (*ActivationResponse, *basemodel.ApiError) {
|
||||||
validReq := map[string]string{
|
validReq := map[string]string{
|
||||||
"activationId": activationId,
|
"activationId": activationId,
|
||||||
}
|
}
|
||||||
@@ -88,12 +89,12 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
|
|||||||
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
return nil, basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(response.Body)
|
body, err := io.ReadAll(response.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
return nil, basemodel.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
@@ -103,14 +104,14 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
|
|||||||
a := ActivationResult{}
|
a := ActivationResult{}
|
||||||
err = json.Unmarshal(body, &a)
|
err = json.Unmarshal(body, &a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
return nil, basemodel.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||||
}
|
}
|
||||||
return a.Data, nil
|
return a.Data, nil
|
||||||
case 400, 401:
|
case 400, 401:
|
||||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
return nil, basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
"bad request error received from license.signoz.io"))
|
"bad request error received from license.signoz.io"))
|
||||||
default:
|
default:
|
||||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
return nil, basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
"internal error received from license.signoz.io"))
|
"internal error received from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,21 +128,21 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SendUsage reports the usage of signoz to license server
|
// SendUsage reports the usage of signoz to license server
|
||||||
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
func SendUsage(ctx context.Context, usage model.UsagePayload) *basemodel.ApiError {
|
||||||
reqString, _ := json.Marshal(usage)
|
reqString, _ := json.Marshal(usage)
|
||||||
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
|
return basemodel.BadRequest(errors.Wrap(err, "unable to create http request"))
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := http.DefaultClient.Do(req)
|
res, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
return basemodel.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := io.ReadAll(res.Body)
|
body, err := io.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
return basemodel.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
@@ -150,10 +151,10 @@ func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
|||||||
case 200, 201:
|
case 200, 201:
|
||||||
return nil
|
return nil
|
||||||
case 400, 401:
|
case 400, 401:
|
||||||
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
return basemodel.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
"bad request error received from license.signoz.io"))
|
"bad request error received from license.signoz.io"))
|
||||||
default:
|
default:
|
||||||
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
return basemodel.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||||
"internal error received from license.signoz.io"))
|
"internal error received from license.signoz.io"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,8 +48,9 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
|||||||
return licenses, nil
|
return licenses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveLicense fetches the latest active license from DB
|
// GetActiveLicense fetches the latest active license from DB.
|
||||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
|
// If the license is not present, expect a nil license and a nil error in the output.
|
||||||
|
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||||
var err error
|
var err error
|
||||||
licenses := []model.License{}
|
licenses := []model.License{}
|
||||||
|
|
||||||
@@ -57,7 +58,7 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
|
|||||||
|
|
||||||
err = r.db.Select(&licenses, query)
|
err = r.db.Select(&licenses, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get active licenses from db: %v", err)
|
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
var active *model.License
|
var active *model.License
|
||||||
@@ -97,7 +98,7 @@ func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
|||||||
l.ValidationMessage)
|
l.ValidationMessage)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error in inserting license data: ", zap.Error(err))
|
zap.L().Error("error in inserting license data: ", zap.Error(err))
|
||||||
return fmt.Errorf("failed to insert license in db: %v", err)
|
return fmt.Errorf("failed to insert license in db: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,7 +111,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
|||||||
planDetails string) error {
|
planDetails string) error {
|
||||||
|
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return fmt.Errorf("Update Plan Details failed: license key is required")
|
return fmt.Errorf("update plan details failed: license key is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := `UPDATE licenses
|
query := `UPDATE licenses
|
||||||
@@ -121,7 +122,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
|||||||
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error in updating license: ", zap.Error(err))
|
zap.L().Error("error in updating license: ", zap.Error(err))
|
||||||
return fmt.Errorf("failed to update license in db: %v", err)
|
return fmt.Errorf("failed to update license in db: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
|
||||||
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||||
@@ -48,8 +49,7 @@ type Manager struct {
|
|||||||
activeFeatures basemodel.FeatureSet
|
activeFeatures basemodel.FeatureSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||||
|
|
||||||
if LM != nil {
|
if LM != nil {
|
||||||
return LM, nil
|
return LM, nil
|
||||||
}
|
}
|
||||||
@@ -65,7 +65,7 @@ func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
|||||||
repo: &repo,
|
repo: &repo,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.start(); err != nil {
|
if err := m.start(features...); err != nil {
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
LM = m
|
LM = m
|
||||||
@@ -73,8 +73,8 @@ func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// start loads active license in memory and initiates validator
|
// start loads active license in memory and initiates validator
|
||||||
func (lm *Manager) start() error {
|
func (lm *Manager) start(features ...basemodel.Feature) error {
|
||||||
err := lm.LoadActiveLicense()
|
err := lm.LoadActiveLicense(features...)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -84,7 +84,7 @@ func (lm *Manager) Stop() {
|
|||||||
<-lm.terminated
|
<-lm.terminated
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) SetActive(l *model.License) {
|
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
||||||
lm.mutex.Lock()
|
lm.mutex.Lock()
|
||||||
defer lm.mutex.Unlock()
|
defer lm.mutex.Unlock()
|
||||||
|
|
||||||
@@ -93,13 +93,13 @@ func (lm *Manager) SetActive(l *model.License) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lm.activeLicense = l
|
lm.activeLicense = l
|
||||||
lm.activeFeatures = l.FeatureSet
|
lm.activeFeatures = append(l.FeatureSet, features...)
|
||||||
// set default features
|
// set default features
|
||||||
setDefaultFeatures(lm)
|
setDefaultFeatures(lm)
|
||||||
|
|
||||||
err := lm.InitFeatures(lm.activeFeatures)
|
err := lm.InitFeatures(lm.activeFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Panicf("Couldn't activate features: %v", err)
|
zap.L().Panic("Couldn't activate features", zap.Error(err))
|
||||||
}
|
}
|
||||||
if !lm.validatorRunning {
|
if !lm.validatorRunning {
|
||||||
// we want to make sure only one validator runs,
|
// we want to make sure only one validator runs,
|
||||||
@@ -115,22 +115,21 @@ func setDefaultFeatures(lm *Manager) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadActiveLicense loads the most recent active license
|
// LoadActiveLicense loads the most recent active license
|
||||||
func (lm *Manager) LoadActiveLicense() error {
|
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
||||||
var err error
|
|
||||||
active, err := lm.repo.GetActiveLicense(context.Background())
|
active, err := lm.repo.GetActiveLicense(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if active != nil {
|
if active != nil {
|
||||||
lm.SetActive(active)
|
lm.SetActive(active, features...)
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("No active license found, defaulting to basic plan")
|
zap.L().Info("No active license found, defaulting to basic plan")
|
||||||
// if no active license is found, we default to basic(free) plan with all default features
|
// if no active license is found, we default to basic(free) plan with all default features
|
||||||
lm.activeFeatures = model.BasicPlan
|
lm.activeFeatures = model.BasicPlan
|
||||||
setDefaultFeatures(lm)
|
setDefaultFeatures(lm)
|
||||||
err := lm.InitFeatures(lm.activeFeatures)
|
err := lm.InitFeatures(lm.activeFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Couldn't initialize features: ", err)
|
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -138,11 +137,11 @@ func (lm *Manager) LoadActiveLicense() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
|
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *basemodel.ApiError) {
|
||||||
|
|
||||||
licenses, err := lm.repo.GetLicenses(ctx)
|
licenses, err := lm.repo.GetLicenses(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, l := range licenses {
|
for _, l := range licenses {
|
||||||
@@ -190,7 +189,7 @@ func (lm *Manager) Validator(ctx context.Context) {
|
|||||||
|
|
||||||
// Validate validates the current active license
|
// Validate validates the current active license
|
||||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||||
zap.S().Info("License validation started")
|
zap.L().Info("License validation started")
|
||||||
if lm.activeLicense == nil {
|
if lm.activeLicense == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -200,12 +199,12 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
|
|
||||||
lm.lastValidated = time.Now().Unix()
|
lm.lastValidated = time.Now().Unix()
|
||||||
if reterr != nil {
|
if reterr != nil {
|
||||||
zap.S().Errorf("License validation completed with error", reterr)
|
zap.L().Error("License validation completed with error", zap.Error(reterr))
|
||||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||||
map[string]interface{}{"err": reterr.Error()})
|
map[string]interface{}{"err": reterr.Error()}, "", true, false)
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("License validation completed with no errors")
|
zap.L().Info("License validation completed with no errors")
|
||||||
}
|
}
|
||||||
|
|
||||||
lm.mutex.Unlock()
|
lm.mutex.Unlock()
|
||||||
@@ -213,8 +212,8 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
|
|
||||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.S().Errorf("failed to validate license", apiError)
|
zap.L().Error("failed to validate license", zap.Any("apiError", apiError))
|
||||||
return apiError.Err
|
return apiError
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
||||||
@@ -234,7 +233,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := l.ParsePlan(); err != nil {
|
if err := l.ParsePlan(); err != nil {
|
||||||
zap.S().Errorf("failed to parse updated license", zap.Error(err))
|
zap.L().Error("failed to parse updated license", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,7 +243,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// unexpected db write issue but we can let the user continue
|
// unexpected db write issue but we can let the user continue
|
||||||
// and wait for update to work in next cycle.
|
// and wait for update to work in next cycle.
|
||||||
zap.S().Errorf("failed to validate license", zap.Error(err))
|
zap.L().Error("failed to validate license", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,17 +255,20 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Activate activates a license key with signoz server
|
// Activate activates a license key with signoz server
|
||||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *basemodel.ApiError) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if errResponse != nil {
|
if errResponse != nil {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||||
map[string]interface{}{"err": errResponse.Err.Error()})
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||||
|
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
response, apiError := validate.ActivateLicense(key, "")
|
response, apiError := validate.ActivateLicense(key, "")
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
|
zap.L().Error("failed to activate license", zap.Any("apiError", apiError))
|
||||||
return nil, apiError
|
return nil, apiError
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,15 +282,15 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
|||||||
err := l.ParsePlan()
|
err := l.ParsePlan()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// store the license before activating it
|
// store the license before activating it
|
||||||
err = lm.repo.InsertLicense(ctx, l)
|
err = lm.repo.InsertLicense(ctx, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, basemodel.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// license is valid, activate it
|
// license is valid, activate it
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func InitDB(db *sqlx.DB) error {
|
|||||||
|
|
||||||
_, err = db.Exec(table_schema)
|
_, err = db.Exec(table_schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error in creating licenses table: %s", err.Error())
|
return fmt.Errorf("error in creating licenses table: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
||||||
@@ -45,7 +45,7 @@ func InitDB(db *sqlx.DB) error {
|
|||||||
|
|
||||||
_, err = db.Exec(table_schema)
|
_, err = db.Exec(table_schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error in creating feature_status table: %s", err.Error())
|
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -14,10 +14,11 @@ import (
|
|||||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||||
"go.signoz.io/signoz/ee/query-service/app"
|
"go.signoz.io/signoz/ee/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||||
"go.signoz.io/signoz/pkg/query-service/version"
|
"go.signoz.io/signoz/pkg/query-service/version"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
|
||||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||||
@@ -27,18 +28,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||||
config := zap.NewDevelopmentConfig()
|
config := zap.NewProductionConfig()
|
||||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
|
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||||
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
|
|
||||||
defaultLogLevel := zapcore.DebugLevel
|
|
||||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
|
||||||
config.EncoderConfig.TimeKey = "timestamp"
|
config.EncoderConfig.TimeKey = "timestamp"
|
||||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
|
||||||
|
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||||
|
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||||
|
defaultLogLevel := zapcore.InfoLevel
|
||||||
|
|
||||||
res := resource.NewWithAttributes(
|
res := resource.NewWithAttributes(
|
||||||
semconv.SchemaURL,
|
semconv.SchemaURL,
|
||||||
semconv.ServiceNameKey.String("query-service"),
|
semconv.ServiceNameKey.String("query-service"),
|
||||||
@@ -48,14 +50,16 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
|||||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||||
)
|
)
|
||||||
|
|
||||||
if enableQueryServiceLogOTLPExport == true {
|
if enableQueryServiceLogOTLPExport {
|
||||||
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
|
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||||
|
defer cancel()
|
||||||
|
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
|
log.Fatalf("failed to establish connection: %v", err)
|
||||||
} else {
|
} else {
|
||||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logExportBatchSizeInt = 1000
|
logExportBatchSizeInt = 512
|
||||||
}
|
}
|
||||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||||
BatchSize: logExportBatchSizeInt,
|
BatchSize: logExportBatchSizeInt,
|
||||||
@@ -81,6 +85,7 @@ func main() {
|
|||||||
|
|
||||||
// the url used to build link in the alert messages in slack and other systems
|
// the url used to build link in the alert messages in slack and other systems
|
||||||
var ruleRepoURL string
|
var ruleRepoURL string
|
||||||
|
var cluster string
|
||||||
|
|
||||||
var cacheConfigPath, fluxInterval string
|
var cacheConfigPath, fluxInterval string
|
||||||
var enableQueryServiceLogOTLPExport bool
|
var enableQueryServiceLogOTLPExport bool
|
||||||
@@ -90,6 +95,7 @@ func main() {
|
|||||||
var maxIdleConns int
|
var maxIdleConns int
|
||||||
var maxOpenConns int
|
var maxOpenConns int
|
||||||
var dialTimeout time.Duration
|
var dialTimeout time.Duration
|
||||||
|
var gatewayUrl string
|
||||||
|
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
@@ -103,6 +109,8 @@ func main() {
|
|||||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
||||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||||
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
|
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@@ -111,7 +119,6 @@ func main() {
|
|||||||
zap.ReplaceGlobals(loggerMgr)
|
zap.ReplaceGlobals(loggerMgr)
|
||||||
defer loggerMgr.Sync() // flushes buffer, if any
|
defer loggerMgr.Sync() // flushes buffer, if any
|
||||||
|
|
||||||
logger := loggerMgr.Sugar()
|
|
||||||
version.PrintVersion()
|
version.PrintVersion()
|
||||||
|
|
||||||
serverOptions := &app.ServerOptions{
|
serverOptions := &app.ServerOptions{
|
||||||
@@ -128,28 +135,36 @@ func main() {
|
|||||||
DialTimeout: dialTimeout,
|
DialTimeout: dialTimeout,
|
||||||
CacheConfigPath: cacheConfigPath,
|
CacheConfigPath: cacheConfigPath,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
|
Cluster: cluster,
|
||||||
|
GatewayUrl: gatewayUrl,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the jwt secret key
|
// Read the jwt secret key
|
||||||
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
||||||
|
|
||||||
if len(auth.JwtSecret) == 0 {
|
if len(auth.JwtSecret) == 0 {
|
||||||
zap.S().Warn("No JWT secret key is specified.")
|
zap.L().Warn("No JWT secret key is specified.")
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("No JWT secret key set successfully.")
|
zap.L().Info("JWT secret key set successfully.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||||
|
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
zap.L().Info("Migration successful")
|
||||||
}
|
}
|
||||||
|
|
||||||
server, err := app.NewServer(serverOptions)
|
server, err := app.NewServer(serverOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal("Failed to create server", zap.Error(err))
|
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := server.Start(); err != nil {
|
if err := server.Start(); err != nil {
|
||||||
logger.Fatal("Could not start servers", zap.Error(err))
|
zap.L().Fatal("Could not start server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := auth.InitAuthCache(context.Background()); err != nil {
|
if err := auth.InitAuthCache(context.Background()); err != nil {
|
||||||
logger.Fatal("Failed to initialize auth cache", zap.Error(err))
|
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
signalsChannel := make(chan os.Signal, 1)
|
signalsChannel := make(chan os.Signal, 1)
|
||||||
@@ -158,9 +173,9 @@ func main() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case status := <-server.HealthCheckStatus():
|
case status := <-server.HealthCheckStatus():
|
||||||
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
||||||
case <-signalsChannel:
|
case <-signalsChannel:
|
||||||
logger.Fatal("Received OS Interrupt Signal ... ")
|
zap.L().Fatal("Received OS Interrupt Signal ... ")
|
||||||
server.Stop()
|
server.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
saml2 "github.com/russellhaering/gosaml2"
|
saml2 "github.com/russellhaering/gosaml2"
|
||||||
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
|
||||||
"go.signoz.io/signoz/ee/query-service/sso"
|
"go.signoz.io/signoz/ee/query-service/sso"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
@@ -24,16 +24,16 @@ const (
|
|||||||
|
|
||||||
// OrgDomain identify org owned web domains for auth and other purposes
|
// OrgDomain identify org owned web domains for auth and other purposes
|
||||||
type OrgDomain struct {
|
type OrgDomain struct {
|
||||||
Id uuid.UUID `json:"id"`
|
Id uuid.UUID `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
OrgId string `json:"orgId"`
|
OrgId string `json:"orgId"`
|
||||||
SsoEnabled bool `json:"ssoEnabled"`
|
SsoEnabled bool `json:"ssoEnabled"`
|
||||||
SsoType SSOType `json:"ssoType"`
|
SsoType SSOType `json:"ssoType"`
|
||||||
|
|
||||||
SamlConfig *SamlConfig `json:"samlConfig"`
|
SamlConfig *SamlConfig `json:"samlConfig"`
|
||||||
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
||||||
|
|
||||||
Org *basemodel.Organization
|
Org *basemodel.Organization
|
||||||
}
|
}
|
||||||
|
|
||||||
func (od *OrgDomain) String() string {
|
func (od *OrgDomain) String() string {
|
||||||
@@ -100,11 +100,11 @@ func (od *OrgDomain) GetSAMLCert() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
||||||
// requesting OAuth and also used in processing response from google
|
// requesting OAuth and also used in processing response from google
|
||||||
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
||||||
if od.GoogleAuthConfig == nil {
|
if od.GoogleAuthConfig == nil {
|
||||||
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
|
return nil, fmt.Errorf("GOOGLE OAUTH is not setup correctly for this domain")
|
||||||
}
|
}
|
||||||
|
|
||||||
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
|
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
|
||||||
@@ -137,38 +137,36 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||||
|
|
||||||
|
|
||||||
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
||||||
|
|
||||||
// build redirect url from window.location sent by frontend
|
// build redirect url from window.location sent by frontend
|
||||||
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
||||||
|
|
||||||
// prepare state that gets relayed back when the auth provider
|
// prepare state that gets relayed back when the auth provider
|
||||||
// calls back our url. here we pass the app url (where signoz runs)
|
// calls back our url. here we pass the app url (where signoz runs)
|
||||||
// and the domain Id. The domain Id helps in identifying sso config
|
// and the domain Id. The domain Id helps in identifying sso config
|
||||||
// when the call back occurs and the app url is useful in redirecting user
|
// when the call back occurs and the app url is useful in redirecting user
|
||||||
// back to the right path.
|
// back to the right path.
|
||||||
// why do we need to pass app url? the callback typically is handled by backend
|
// why do we need to pass app url? the callback typically is handled by backend
|
||||||
// and sometimes backend might right at a different port or is unaware of frontend
|
// and sometimes backend might right at a different port or is unaware of frontend
|
||||||
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
||||||
// along with frontend window.location and use it to relay the information through
|
// along with frontend window.location and use it to relay the information through
|
||||||
// auth provider to the backend (HandleCallback or HandleSSO method).
|
// auth provider to the backend (HandleCallback or HandleSSO method).
|
||||||
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
||||||
|
|
||||||
|
|
||||||
switch (od.SsoType) {
|
switch od.SsoType {
|
||||||
case SAML:
|
case SAML:
|
||||||
|
|
||||||
sp, err := od.PrepareSamlRequest(siteUrl)
|
sp, err := od.PrepareSamlRequest(siteUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sp.BuildAuthURL(relayState)
|
return sp.BuildAuthURL(relayState)
|
||||||
|
|
||||||
case GoogleAuth:
|
case GoogleAuth:
|
||||||
|
|
||||||
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -176,9 +174,8 @@ func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
|||||||
return googleProvider.BuildAuthURL(relayState)
|
return googleProvider.BuildAuthURL(relayState)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
||||||
return "", fmt.Errorf("unsupported SSO config for the domain")
|
return "", fmt.Errorf("unsupported SSO config for the domain")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,107 +1,5 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ApiError struct {
|
|
||||||
Typ basemodel.ErrorType
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ApiError) Type() basemodel.ErrorType {
|
|
||||||
return a.Typ
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ApiError) ToError() error {
|
|
||||||
if a != nil {
|
|
||||||
return a.Err
|
|
||||||
}
|
|
||||||
return a.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ApiError) Error() string {
|
|
||||||
return a.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ApiError) IsNil() bool {
|
|
||||||
return a == nil || a.Err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewApiError returns a ApiError object of given type
|
|
||||||
func NewApiError(typ basemodel.ErrorType, err error) *ApiError {
|
|
||||||
return &ApiError{
|
|
||||||
Typ: typ,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BadRequest returns a ApiError object of bad request
|
|
||||||
func BadRequest(err error) *ApiError {
|
|
||||||
return &ApiError{
|
|
||||||
Typ: basemodel.ErrorBadData,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BadRequestStr returns a ApiError object of bad request for string input
|
|
||||||
func BadRequestStr(s string) *ApiError {
|
|
||||||
return &ApiError{
|
|
||||||
Typ: basemodel.ErrorBadData,
|
|
||||||
Err: fmt.Errorf(s),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalError returns a ApiError object of internal type
|
|
||||||
func InternalError(err error) *ApiError {
|
|
||||||
return &ApiError{
|
|
||||||
Typ: basemodel.ErrorInternal,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalErrorStr returns a ApiError object of internal type for string input
|
|
||||||
func InternalErrorStr(s string) *ApiError {
|
|
||||||
return &ApiError{
|
|
||||||
Typ: basemodel.ErrorInternal,
|
|
||||||
Err: fmt.Errorf(s),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrorNone basemodel.ErrorType = ""
|
|
||||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
|
||||||
ErrorCanceled basemodel.ErrorType = "canceled"
|
|
||||||
ErrorExec basemodel.ErrorType = "execution"
|
|
||||||
ErrorBadData basemodel.ErrorType = "bad_data"
|
|
||||||
ErrorInternal basemodel.ErrorType = "internal"
|
|
||||||
ErrorUnavailable basemodel.ErrorType = "unavailable"
|
|
||||||
ErrorNotFound basemodel.ErrorType = "not_found"
|
|
||||||
ErrorNotImplemented basemodel.ErrorType = "not_implemented"
|
|
||||||
ErrorUnauthorized basemodel.ErrorType = "unauthorized"
|
|
||||||
ErrorForbidden basemodel.ErrorType = "forbidden"
|
|
||||||
ErrorConflict basemodel.ErrorType = "conflict"
|
|
||||||
ErrorStreamingNotSupported basemodel.ErrorType = "streaming is not supported"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
ErrorNone = basemodel.ErrorNone
|
|
||||||
ErrorTimeout = basemodel.ErrorTimeout
|
|
||||||
ErrorCanceled = basemodel.ErrorCanceled
|
|
||||||
ErrorExec = basemodel.ErrorExec
|
|
||||||
ErrorBadData = basemodel.ErrorBadData
|
|
||||||
ErrorInternal = basemodel.ErrorInternal
|
|
||||||
ErrorUnavailable = basemodel.ErrorUnavailable
|
|
||||||
ErrorNotFound = basemodel.ErrorNotFound
|
|
||||||
ErrorNotImplemented = basemodel.ErrorNotImplemented
|
|
||||||
ErrorUnauthorized = basemodel.ErrorUnauthorized
|
|
||||||
ErrorForbidden = basemodel.ErrorForbidden
|
|
||||||
ErrorConflict = basemodel.ErrorConflict
|
|
||||||
ErrorStreamingNotSupported = basemodel.ErrorStreamingNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
type ErrUnsupportedAuth struct{}
|
type ErrUnsupportedAuth struct{}
|
||||||
|
|
||||||
func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {
|
func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {
|
||||||
|
|||||||
@@ -89,3 +89,18 @@ func (l *License) ParseFeatures() {
|
|||||||
l.FeatureSet = BasicPlan
|
l.FeatureSet = BasicPlan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Licenses struct {
|
||||||
|
TrialStart int64 `json:"trialStart"`
|
||||||
|
TrialEnd int64 `json:"trialEnd"`
|
||||||
|
OnTrial bool `json:"onTrial"`
|
||||||
|
WorkSpaceBlock bool `json:"workSpaceBlock"`
|
||||||
|
TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
|
||||||
|
GracePeriodEnd int64 `json:"gracePeriodEnd"`
|
||||||
|
Licenses []License `json:"licenses"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubscriptionServerResp struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data Licenses `json:"data"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,32 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
type PAT struct {
|
type User struct {
|
||||||
Id string `json:"id" db:"id"`
|
Id string `json:"id" db:"id"`
|
||||||
UserID string `json:"userId" db:"user_id"`
|
Name string `json:"name" db:"name"`
|
||||||
Token string `json:"token" db:"token"`
|
Email string `json:"email" db:"email"`
|
||||||
Name string `json:"name" db:"name"`
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
ProfilePictureURL string `json:"profilePictureURL" db:"profile_picture_url"`
|
||||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
|
NotFound bool `json:"notFound"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreatePATRequestBody struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Role string `json:"role"`
|
||||||
|
ExpiresInDays int64 `json:"expiresInDays"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PAT struct {
|
||||||
|
Id string `json:"id" db:"id"`
|
||||||
|
UserID string `json:"userId" db:"user_id"`
|
||||||
|
CreatedByUser User `json:"createdByUser"`
|
||||||
|
UpdatedByUser User `json:"updatedByUser"`
|
||||||
|
Token string `json:"token" db:"token"`
|
||||||
|
Role string `json:"role" db:"role"`
|
||||||
|
Name string `json:"name" db:"name"`
|
||||||
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
|
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
|
||||||
|
UpdatedAt int64 `json:"updatedAt" db:"updated_at"`
|
||||||
|
LastUsed int64 `json:"lastUsed" db:"last_used"`
|
||||||
|
Revoked bool `json:"revoked" db:"revoked"`
|
||||||
|
UpdatedByUserID string `json:"updatedByUserId" db:"updated_by_user_id"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,14 +52,14 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
Name: basemodel.QueryBuilderPanels,
|
Name: basemodel.QueryBuilderPanels,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 5,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.QueryBuilderAlerts,
|
Name: basemodel.QueryBuilderAlerts,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 5,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
@@ -90,6 +90,13 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: false,
|
Active: false,
|
||||||
@@ -177,6 +184,13 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: true,
|
Active: true,
|
||||||
@@ -264,6 +278,13 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: true,
|
Active: true,
|
||||||
@@ -279,17 +300,17 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: Onboarding,
|
Name: Onboarding,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: ChatSupport,
|
Name: ChatSupport,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ type Usage struct {
|
|||||||
TimeStamp time.Time `json:"timestamp"`
|
TimeStamp time.Time `json:"timestamp"`
|
||||||
Count int64 `json:"count"`
|
Count int64 `json:"count"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
|
OrgName string `json:"orgName"`
|
||||||
|
TenantId string `json:"tenantId"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UsageDB struct {
|
type UsageDB struct {
|
||||||
|
|||||||
@@ -102,6 +102,6 @@ func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*
|
|||||||
IDPCertificateStore: certStore,
|
IDPCertificateStore: certStore,
|
||||||
SPKeyStore: randomKeyStore,
|
SPKeyStore: randomKeyStore,
|
||||||
}
|
}
|
||||||
zap.S().Debugf("SAML request:", sp)
|
zap.L().Debug("SAML request", zap.Any("sp", sp))
|
||||||
return sp, nil
|
return sp, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@@ -11,10 +13,10 @@ import (
|
|||||||
"github.com/ClickHouse/clickhouse-go/v2"
|
"github.com/ClickHouse/clickhouse-go/v2"
|
||||||
"github.com/go-co-op/gocron"
|
"github.com/go-co-op/gocron"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
@@ -38,15 +40,29 @@ type Manager struct {
|
|||||||
licenseRepo *license.Repo
|
licenseRepo *license.Repo
|
||||||
|
|
||||||
scheduler *gocron.Scheduler
|
scheduler *gocron.Scheduler
|
||||||
|
|
||||||
|
modelDao dao.ModelDao
|
||||||
|
|
||||||
|
tenantID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||||
|
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
||||||
|
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
|
||||||
|
|
||||||
|
tenantID := ""
|
||||||
|
if len(hostNameRegexMatches) == 2 {
|
||||||
|
tenantID = hostNameRegexMatches[1]
|
||||||
|
tenantID = strings.TrimSuffix(tenantID, "-clickhouse")
|
||||||
|
}
|
||||||
|
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
// repository: repo,
|
// repository: repo,
|
||||||
clickhouseConn: clickhouseConn,
|
clickhouseConn: clickhouseConn,
|
||||||
licenseRepo: licenseRepo,
|
licenseRepo: licenseRepo,
|
||||||
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||||
|
modelDao: modelDao,
|
||||||
|
tenantID: tenantID,
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
@@ -75,12 +91,12 @@ func (lm *Manager) UploadUsage() {
|
|||||||
// check if license is present or not
|
// check if license is present or not
|
||||||
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
|
zap.L().Error("failed to get active license", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if license == nil {
|
if license == nil {
|
||||||
// we will not start the usage reporting if license is not present.
|
// we will not start the usage reporting if license is not present.
|
||||||
zap.S().Info("no license present, skipping usage reporting")
|
zap.L().Info("no license present, skipping usage reporting")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,7 +123,7 @@ func (lm *Manager) UploadUsage() {
|
|||||||
dbusages := []model.UsageDB{}
|
dbusages := []model.UsageDB{}
|
||||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||||
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
|
zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, u := range dbusages {
|
for _, u := range dbusages {
|
||||||
@@ -117,24 +133,33 @@ func (lm *Manager) UploadUsage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(usages) <= 0 {
|
if len(usages) <= 0 {
|
||||||
zap.S().Info("no snapshots to upload, skipping.")
|
zap.L().Info("no snapshots to upload, skipping.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("uploading usage data")
|
zap.L().Info("uploading usage data")
|
||||||
|
|
||||||
|
orgName := ""
|
||||||
|
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
|
||||||
|
if orgError != nil {
|
||||||
|
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
|
||||||
|
}
|
||||||
|
if len(orgNames) == 1 {
|
||||||
|
orgName = orgNames[0].Name
|
||||||
|
}
|
||||||
|
|
||||||
usagesPayload := []model.Usage{}
|
usagesPayload := []model.Usage{}
|
||||||
for _, usage := range usages {
|
for _, usage := range usages {
|
||||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
|
zap.L().Error("error while decrypting usage data: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
usageData := model.Usage{}
|
usageData := model.Usage{}
|
||||||
err = json.Unmarshal(usageDataBytes, &usageData)
|
err = json.Unmarshal(usageDataBytes, &usageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
|
zap.L().Error("error while unmarshalling usage data: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,6 +167,8 @@ func (lm *Manager) UploadUsage() {
|
|||||||
usageData.ExporterID = usage.ExporterID
|
usageData.ExporterID = usage.ExporterID
|
||||||
usageData.Type = usage.Type
|
usageData.Type = usage.Type
|
||||||
usageData.Tenant = usage.Tenant
|
usageData.Tenant = usage.Tenant
|
||||||
|
usageData.OrgName = orgName
|
||||||
|
usageData.TenantId = lm.tenantID
|
||||||
usagesPayload = append(usagesPayload, usageData)
|
usagesPayload = append(usagesPayload, usageData)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,13 +184,13 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
for i := 1; i <= MaxRetries; i++ {
|
for i := 1; i <= MaxRetries; i++ {
|
||||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||||
if apiErr != nil && i == MaxRetries {
|
if apiErr != nil && i == MaxRetries {
|
||||||
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
zap.L().Error("retries stopped : %v", zap.Error(apiErr))
|
||||||
// not returning error here since it is captured in the failed count
|
// not returning error here since it is captured in the failed count
|
||||||
return
|
return
|
||||||
} else if apiErr != nil {
|
} else if apiErr != nil {
|
||||||
// sleeping for exponential backoff
|
// sleeping for exponential backoff
|
||||||
sleepDuration := RetryInterval * time.Duration(i)
|
sleepDuration := RetryInterval * time.Duration(i)
|
||||||
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
|
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Any("apiErr", apiErr))
|
||||||
time.Sleep(sleepDuration)
|
time.Sleep(sleepDuration)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
@@ -174,7 +201,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
func (lm *Manager) Stop() {
|
func (lm *Manager) Stop() {
|
||||||
lm.scheduler.Stop()
|
lm.scheduler.Stop()
|
||||||
|
|
||||||
zap.S().Debug("sending usage data before shutting down")
|
zap.L().Info("sending usage data before shutting down")
|
||||||
// send usage before shutting down
|
// send usage before shutting down
|
||||||
lm.UploadUsage()
|
lm.UploadUsage()
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
node_modules
|
node_modules
|
||||||
.vscode
|
.vscode
|
||||||
build
|
|
||||||
.git
|
.git
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ module.exports = {
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||||
|
'no-plusplus': 'off',
|
||||||
'jsx-a11y/label-has-associated-control': [
|
'jsx-a11y/label-has-associated-control': [
|
||||||
'error',
|
'error',
|
||||||
{
|
{
|
||||||
@@ -109,7 +110,6 @@ module.exports = {
|
|||||||
// eslint rules need to remove
|
// eslint rules need to remove
|
||||||
'@typescript-eslint/no-shadow': 'off',
|
'@typescript-eslint/no-shadow': 'off',
|
||||||
'import/no-cycle': 'off',
|
'import/no-cycle': 'off',
|
||||||
|
|
||||||
'prettier/prettier': [
|
'prettier/prettier': [
|
||||||
'error',
|
'error',
|
||||||
{},
|
{},
|
||||||
|
|||||||
3
frontend/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
|
||||||
|
# Sentry Config File
|
||||||
|
.env.sentry-build-plugin
|
||||||
@@ -2,3 +2,19 @@
|
|||||||
. "$(dirname "$0")/_/husky.sh"
|
. "$(dirname "$0")/_/husky.sh"
|
||||||
|
|
||||||
cd frontend && yarn run commitlint --edit $1
|
cd frontend && yarn run commitlint --edit $1
|
||||||
|
|
||||||
|
branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
|
||||||
|
color_red="$(tput setaf 1)"
|
||||||
|
bold="$(tput bold)"
|
||||||
|
reset="$(tput sgr0)"
|
||||||
|
|
||||||
|
if [ "$branch" = "main" ]; then
|
||||||
|
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$branch" = "develop" ]; then
|
||||||
|
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
6
frontend/.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Ignore artifacts:
|
||||||
|
build
|
||||||
|
coverage
|
||||||
|
|
||||||
|
# Ignore all MD files:
|
||||||
|
**/*.md
|
||||||
@@ -1,38 +1,17 @@
|
|||||||
# Builder stage
|
FROM nginx:1.26-alpine
|
||||||
FROM node:16.15.0 as builder
|
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|
||||||
ARG TARGETOS=linux
|
# Set working directory
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
WORKDIR /frontend
|
WORKDIR /frontend
|
||||||
|
|
||||||
# Copy the package.json and .yarnrc files prior to install dependencies
|
|
||||||
COPY package.json ./
|
|
||||||
# Copy lock file
|
|
||||||
COPY yarn.lock ./
|
|
||||||
COPY .yarnrc ./
|
|
||||||
|
|
||||||
# Install the dependencies and make the folder
|
|
||||||
RUN CI=1 yarn install
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build the project and copy the files
|
|
||||||
RUN yarn build
|
|
||||||
|
|
||||||
|
|
||||||
FROM nginx:1.25.2-alpine
|
|
||||||
|
|
||||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
# Remove default nginx index page
|
# Remove default nginx index page
|
||||||
RUN rm -rf /usr/share/nginx/html/*
|
RUN rm -rf /usr/share/nginx/html/*
|
||||||
|
|
||||||
# Copy from the stahg 1
|
# Copy custom nginx config and static files
|
||||||
COPY --from=builder /frontend/build /usr/share/nginx/html
|
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||||
|
COPY build /usr/share/nginx/html
|
||||||
|
|
||||||
EXPOSE 3301
|
EXPOSE 3301
|
||||||
|
|
||||||
|
|||||||
@@ -4,10 +4,11 @@ const config: Config.InitialOptions = {
|
|||||||
clearMocks: true,
|
clearMocks: true,
|
||||||
coverageDirectory: 'coverage',
|
coverageDirectory: 'coverage',
|
||||||
coverageReporters: ['text', 'cobertura', 'html', 'json-summary'],
|
coverageReporters: ['text', 'cobertura', 'html', 'json-summary'],
|
||||||
|
collectCoverageFrom: ['src/**/*.{ts,tsx}'],
|
||||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
||||||
modulePathIgnorePatterns: ['dist'],
|
modulePathIgnorePatterns: ['dist'],
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||||
},
|
},
|
||||||
globals: {
|
globals: {
|
||||||
extensionsToTreatAsEsm: ['.ts'],
|
extensionsToTreatAsEsm: ['.ts'],
|
||||||
@@ -22,7 +23,7 @@ const config: Config.InitialOptions = {
|
|||||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
},
|
},
|
||||||
transformIgnorePatterns: [
|
transformIgnorePatterns: [
|
||||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend)/)',
|
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
|
||||||
],
|
],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
@@ -33,6 +34,14 @@ const config: Config.InitialOptions = {
|
|||||||
browsers: ['chromium', 'firefox', 'webkit'],
|
browsers: ['chromium', 'firefox', 'webkit'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
coverageThreshold: {
|
||||||
|
global: {
|
||||||
|
statements: 80,
|
||||||
|
branches: 65,
|
||||||
|
functions: 80,
|
||||||
|
lines: 80,
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export default config;
|
export default config;
|
||||||
|
|||||||
@@ -7,6 +7,10 @@
|
|||||||
*/
|
*/
|
||||||
import '@testing-library/jest-dom';
|
import '@testing-library/jest-dom';
|
||||||
import 'jest-styled-components';
|
import 'jest-styled-components';
|
||||||
|
import './src/styles.scss';
|
||||||
|
|
||||||
|
import { server } from './src/mocks-server/server';
|
||||||
|
// Establish API mocking before all tests.
|
||||||
|
|
||||||
// Mock window.matchMedia
|
// Mock window.matchMedia
|
||||||
window.matchMedia =
|
window.matchMedia =
|
||||||
@@ -18,3 +22,9 @@ window.matchMedia =
|
|||||||
removeListener: function () {},
|
removeListener: function () {},
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
beforeAll(() => server.listen());
|
||||||
|
|
||||||
|
afterEach(() => server.resetHandlers());
|
||||||
|
|
||||||
|
afterAll(() => server.close());
|
||||||
|
|||||||
@@ -19,7 +19,9 @@
|
|||||||
"playwright:codegen:local": "playwright codegen http://localhost:3301",
|
"playwright:codegen:local": "playwright codegen http://localhost:3301",
|
||||||
"playwright:codegen:local:auth": "yarn playwright:codegen:local --load-storage=tests/auth.json",
|
"playwright:codegen:local:auth": "yarn playwright:codegen:local --load-storage=tests/auth.json",
|
||||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||||
"commitlint": "commitlint --edit $1"
|
"commitlint": "commitlint --edit $1",
|
||||||
|
"test": "jest --coverage",
|
||||||
|
"test:changedsince": "jest --changedSince=develop --coverage --silent"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16.15.0"
|
"node": ">=16.15.0"
|
||||||
@@ -29,16 +31,27 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ant-design/colors": "6.0.0",
|
"@ant-design/colors": "6.0.0",
|
||||||
"@ant-design/icons": "4.8.0",
|
"@ant-design/icons": "4.8.0",
|
||||||
|
"@dnd-kit/core": "6.1.0",
|
||||||
|
"@dnd-kit/modifiers": "7.0.0",
|
||||||
|
"@dnd-kit/sortable": "8.0.0",
|
||||||
"@grafana/data": "^9.5.2",
|
"@grafana/data": "^9.5.2",
|
||||||
"@mdx-js/loader": "2.3.0",
|
"@mdx-js/loader": "2.3.0",
|
||||||
"@mdx-js/react": "2.3.0",
|
"@mdx-js/react": "2.3.0",
|
||||||
"@monaco-editor/react": "^4.3.1",
|
"@monaco-editor/react": "^4.3.1",
|
||||||
|
"@radix-ui/react-tabs": "1.0.4",
|
||||||
|
"@radix-ui/react-tooltip": "1.0.7",
|
||||||
|
"@sentry/react": "7.102.1",
|
||||||
|
"@sentry/webpack-plugin": "2.16.0",
|
||||||
|
"@signozhq/design-tokens": "0.0.8",
|
||||||
"@uiw/react-md-editor": "3.23.5",
|
"@uiw/react-md-editor": "3.23.5",
|
||||||
|
"@visx/group": "3.3.0",
|
||||||
|
"@visx/shape": "3.5.0",
|
||||||
|
"@visx/tooltip": "3.3.0",
|
||||||
"@xstate/react": "^3.0.0",
|
"@xstate/react": "^3.0.0",
|
||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"antd": "5.0.5",
|
"antd": "5.11.0",
|
||||||
"antd-table-saveas-excel": "2.2.1",
|
"antd-table-saveas-excel": "2.2.1",
|
||||||
"axios": "^0.21.0",
|
"axios": "1.6.4",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^29.6.4",
|
"babel-jest": "^29.6.4",
|
||||||
"babel-loader": "9.1.3",
|
"babel-loader": "9.1.3",
|
||||||
@@ -52,7 +65,7 @@
|
|||||||
"color": "^4.2.1",
|
"color": "^4.2.1",
|
||||||
"color-alpha": "1.1.3",
|
"color-alpha": "1.1.3",
|
||||||
"cross-env": "^7.0.3",
|
"cross-env": "^7.0.3",
|
||||||
"css-loader": "4.3.0",
|
"css-loader": "5.0.0",
|
||||||
"css-minimizer-webpack-plugin": "5.0.1",
|
"css-minimizer-webpack-plugin": "5.0.1",
|
||||||
"dayjs": "^1.10.7",
|
"dayjs": "^1.10.7",
|
||||||
"dompurify": "3.0.0",
|
"dompurify": "3.0.0",
|
||||||
@@ -63,6 +76,7 @@
|
|||||||
"fontfaceobserver": "2.3.0",
|
"fontfaceobserver": "2.3.0",
|
||||||
"history": "4.10.1",
|
"history": "4.10.1",
|
||||||
"html-webpack-plugin": "5.5.0",
|
"html-webpack-plugin": "5.5.0",
|
||||||
|
"http-proxy-middleware": "2.0.6",
|
||||||
"i18next": "^21.6.12",
|
"i18next": "^21.6.12",
|
||||||
"i18next-browser-languagedetector": "^6.1.3",
|
"i18next-browser-languagedetector": "^6.1.3",
|
||||||
"i18next-http-backend": "^1.3.2",
|
"i18next-http-backend": "^1.3.2",
|
||||||
@@ -71,26 +85,33 @@
|
|||||||
"less": "^4.1.2",
|
"less": "^4.1.2",
|
||||||
"less-loader": "^10.2.0",
|
"less-loader": "^10.2.0",
|
||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
|
"lucide-react": "0.379.0",
|
||||||
"mini-css-extract-plugin": "2.4.5",
|
"mini-css-extract-plugin": "2.4.5",
|
||||||
"papaparse": "5.4.1",
|
"papaparse": "5.4.1",
|
||||||
|
"rc-tween-one": "3.0.6",
|
||||||
"react": "18.2.0",
|
"react": "18.2.0",
|
||||||
"react-addons-update": "15.6.3",
|
"react-addons-update": "15.6.3",
|
||||||
|
"react-beautiful-dnd": "13.1.1",
|
||||||
"react-dnd": "16.0.1",
|
"react-dnd": "16.0.1",
|
||||||
"react-dnd-html5-backend": "16.0.1",
|
"react-dnd-html5-backend": "16.0.1",
|
||||||
"react-dom": "18.2.0",
|
"react-dom": "18.2.0",
|
||||||
"react-drag-listview": "2.0.0",
|
"react-drag-listview": "2.0.0",
|
||||||
|
"react-error-boundary": "4.0.11",
|
||||||
"react-force-graph": "^1.43.0",
|
"react-force-graph": "^1.43.0",
|
||||||
|
"react-full-screen": "1.1.1",
|
||||||
"react-grid-layout": "^1.3.4",
|
"react-grid-layout": "^1.3.4",
|
||||||
"react-helmet-async": "1.3.0",
|
"react-helmet-async": "1.3.0",
|
||||||
"react-i18next": "^11.16.1",
|
"react-i18next": "^11.16.1",
|
||||||
"react-intersection-observer": "9.4.1",
|
"react-markdown": "8.0.7",
|
||||||
"react-query": "^3.34.19",
|
"react-query": "3.39.3",
|
||||||
"react-redux": "^7.2.2",
|
"react-redux": "^7.2.2",
|
||||||
"react-router-dom": "^5.2.0",
|
"react-router-dom": "^5.2.0",
|
||||||
|
"react-syntax-highlighter": "15.5.0",
|
||||||
"react-use": "^17.3.2",
|
"react-use": "^17.3.2",
|
||||||
"react-virtuoso": "4.0.3",
|
"react-virtuoso": "4.0.3",
|
||||||
"redux": "^4.0.5",
|
"redux": "^4.0.5",
|
||||||
"redux-thunk": "^2.3.0",
|
"redux-thunk": "^2.3.0",
|
||||||
|
"rehype-raw": "7.0.0",
|
||||||
"stream": "^0.0.2",
|
"stream": "^0.0.2",
|
||||||
"style-loader": "1.3.0",
|
"style-loader": "1.3.0",
|
||||||
"styled-components": "^5.3.11",
|
"styled-components": "^5.3.11",
|
||||||
@@ -99,10 +120,12 @@
|
|||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||||
"typescript": "^4.0.5",
|
"typescript": "^4.0.5",
|
||||||
|
"uplot": "1.6.26",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-vitals": "^0.2.4",
|
"web-vitals": "^0.2.4",
|
||||||
"webpack": "5.88.2",
|
"webpack": "5.88.2",
|
||||||
"webpack-dev-server": "^4.15.1",
|
"webpack-dev-server": "^4.15.1",
|
||||||
|
"webpack-retry-chunk-load-plugin": "3.1.1",
|
||||||
"xstate": "^4.31.0"
|
"xstate": "^4.31.0"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
@@ -144,19 +167,22 @@
|
|||||||
"@types/papaparse": "5.3.7",
|
"@types/papaparse": "5.3.7",
|
||||||
"@types/react": "18.0.26",
|
"@types/react": "18.0.26",
|
||||||
"@types/react-addons-update": "0.14.21",
|
"@types/react-addons-update": "0.14.21",
|
||||||
|
"@types/react-beautiful-dnd": "13.1.8",
|
||||||
"@types/react-dom": "18.0.10",
|
"@types/react-dom": "18.0.10",
|
||||||
"@types/react-grid-layout": "^1.1.2",
|
"@types/react-grid-layout": "^1.1.2",
|
||||||
"@types/react-helmet-async": "1.0.3",
|
"@types/react-helmet-async": "1.0.3",
|
||||||
"@types/react-redux": "^7.1.11",
|
"@types/react-redux": "^7.1.11",
|
||||||
"@types/react-resizable": "3.0.3",
|
"@types/react-resizable": "3.0.3",
|
||||||
"@types/react-router-dom": "^5.1.6",
|
"@types/react-router-dom": "^5.1.6",
|
||||||
|
"@types/react-syntax-highlighter": "15.5.7",
|
||||||
|
"@types/redux-mock-store": "1.0.4",
|
||||||
"@types/styled-components": "^5.1.4",
|
"@types/styled-components": "^5.1.4",
|
||||||
"@types/uuid": "^8.3.1",
|
"@types/uuid": "^8.3.1",
|
||||||
"@types/webpack": "^5.28.0",
|
"@types/webpack": "^5.28.0",
|
||||||
"@types/webpack-dev-server": "^4.7.2",
|
"@types/webpack-dev-server": "^4.7.2",
|
||||||
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||||
"@typescript-eslint/parser": "^4.33.0",
|
"@typescript-eslint/parser": "^4.33.0",
|
||||||
"autoprefixer": "^9.0.0",
|
"autoprefixer": "10.4.19",
|
||||||
"babel-plugin-styled-components": "^1.12.0",
|
"babel-plugin-styled-components": "^1.12.0",
|
||||||
"compression-webpack-plugin": "9.0.0",
|
"compression-webpack-plugin": "9.0.0",
|
||||||
"copy-webpack-plugin": "^8.1.0",
|
"copy-webpack-plugin": "^8.1.0",
|
||||||
@@ -181,11 +207,16 @@
|
|||||||
"jest-playwright-preset": "^1.7.2",
|
"jest-playwright-preset": "^1.7.2",
|
||||||
"jest-styled-components": "^7.0.8",
|
"jest-styled-components": "^7.0.8",
|
||||||
"lint-staged": "^12.5.0",
|
"lint-staged": "^12.5.0",
|
||||||
|
"msw": "1.3.2",
|
||||||
|
"npm-run-all": "latest",
|
||||||
"portfinder-sync": "^0.0.2",
|
"portfinder-sync": "^0.0.2",
|
||||||
|
"postcss": "8.4.38",
|
||||||
"prettier": "2.2.1",
|
"prettier": "2.2.1",
|
||||||
|
"raw-loader": "4.0.2",
|
||||||
"react-hooks-testing-library": "0.6.0",
|
"react-hooks-testing-library": "0.6.0",
|
||||||
"react-hot-loader": "^4.13.0",
|
"react-hot-loader": "^4.13.0",
|
||||||
"react-resizable": "3.0.4",
|
"react-resizable": "3.0.4",
|
||||||
|
"redux-mock-store": "1.5.4",
|
||||||
"sass": "1.66.1",
|
"sass": "1.66.1",
|
||||||
"sass-loader": "13.3.2",
|
"sass-loader": "13.3.2",
|
||||||
"ts-jest": "^27.1.5",
|
"ts-jest": "^27.1.5",
|
||||||
@@ -196,7 +227,8 @@
|
|||||||
},
|
},
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
"*.(js|jsx|ts|tsx)": [
|
"*.(js|jsx|ts|tsx)": [
|
||||||
"eslint --fix"
|
"eslint --fix",
|
||||||
|
"sh scripts/typecheck-staged.sh"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"resolutions": {
|
"resolutions": {
|
||||||
@@ -204,6 +236,7 @@
|
|||||||
"@types/react-dom": "18.0.10",
|
"@types/react-dom": "18.0.10",
|
||||||
"debug": "4.3.4",
|
"debug": "4.3.4",
|
||||||
"semver": "7.5.4",
|
"semver": "7.5.4",
|
||||||
"xml2js": "0.5.0"
|
"xml2js": "0.5.0",
|
||||||
|
"phin": "^3.7.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
10
frontend/public/Icons/alert_emoji.svg
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
<svg width="33" height="32" viewBox="0 0 33 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M4.99715 27.2944C4.70156 27.2944 4.74156 27.6477 4.74156 28.3143C4.74156 28.981 4.70156 29.3543 5.05493 29.3543C5.40831 29.3543 27.7778 29.3143 28.0134 29.2965C28.2489 29.2765 28.1889 28.4143 28.1889 28.081C28.1889 27.6699 28.2467 27.3166 27.9156 27.2966C27.5822 27.2766 5.11494 27.2944 4.99715 27.2944Z" fill="#ED6D30"/>
|
||||||
|
<path d="M5.07275 21.8602L5.09498 27.3132L27.7956 27.291L27.8467 21.7135L27.3466 21.1536L5.255 21.1158L5.07275 21.8602Z" fill="#F78A51"/>
|
||||||
|
<path d="M5.53728 21.4707L5.07278 21.8596L5.07056 22.724C5.07056 22.724 5.22169 22.8306 5.37282 22.7551C5.52395 22.6795 5.73508 22.5329 5.92177 22.5173C6.21959 22.4951 6.19514 22.7795 6.48184 22.7795C6.76855 22.7795 7.02858 22.4929 7.27083 22.4929C7.51308 22.4929 7.62421 22.7995 7.88202 22.784C8.13983 22.7684 8.28429 22.5084 8.60655 22.5173C8.86436 22.524 8.90881 22.784 9.22663 22.784C9.54445 22.784 9.70669 22.4818 9.97784 22.4818C10.249 22.4818 10.3379 22.8018 10.6401 22.8018C10.9424 22.8018 11.0246 22.4818 11.3713 22.4818C11.7181 22.4818 11.6892 22.784 11.9759 22.7529C12.2626 22.7218 12.2915 22.4729 12.6382 22.4573C12.9849 22.4418 13.0204 22.784 13.3227 22.784C13.625 22.784 13.6161 22.5373 13.8739 22.5373C14.1317 22.5373 18.9145 22.5262 19.0968 22.5262C19.279 22.5262 19.559 22.8462 19.8613 22.8462C20.1636 22.8462 20.0791 22.504 20.4103 22.4951C20.6081 22.4907 20.9925 22.824 21.2192 22.824C21.4459 22.824 21.5282 22.4818 21.7838 22.4662C22.0393 22.4507 22.4194 22.844 22.7217 22.8129C23.0239 22.7818 22.8728 22.4796 23.0995 22.4507C23.3262 22.4196 23.7796 22.784 24.0818 22.7973C24.3841 22.8129 24.1885 22.404 24.5041 22.404C24.8197 22.404 25.0642 22.7507 25.3953 22.7662C25.7265 22.7818 25.502 22.4196 25.8332 22.3884C26.1643 22.3573 26.4066 22.8418 26.7244 22.8106C27.0422 22.7795 26.9066 22.4329 27.1778 22.4173C27.4489 22.4018 27.8267 22.644 27.8267 22.644L27.8401 21.7063L14.7807 17.582L5.53728 21.4707Z" fill="#ED6D30"/>
|
||||||
|
<path d="M13.8049 29.3267C13.8049 29.3267 13.8605 22.7804 13.8516 22.6204C13.8405 22.4271 14.0116 22.3804 14.1494 22.3804C14.2871 22.3804 18.8558 22.3804 18.9935 22.3804C19.1313 22.3804 19.2113 22.4827 19.2224 22.6093C19.2335 22.736 19.2002 29.3156 19.2002 29.3156L13.8049 29.3267Z" fill="#51362F"/>
|
||||||
|
<path d="M4.15465 18.7244C4.15465 18.7244 3.23898 20.7487 3.24787 20.902C3.25676 21.0553 3.51234 21.9864 3.92128 22.0109C4.48135 22.0442 4.58359 21.5531 4.67693 21.5531C4.77028 21.5531 4.89474 22.0331 5.21478 22.0797C5.58816 22.1331 5.85708 21.5331 6.00154 21.5331C6.14601 21.5331 6.21713 22.0553 6.55495 22.0553C6.89277 22.0553 7.25281 21.4909 7.38616 21.502C7.51951 21.5131 7.64842 22.102 7.92401 22.102C8.20182 22.102 8.47296 21.5998 8.71299 21.5753C8.83745 21.5642 8.95525 22.1375 9.18194 22.1464C9.40864 22.1575 9.79535 21.5531 9.99093 21.5531C10.1865 21.5531 10.3399 22.1775 10.6377 22.1486C10.9355 22.1197 11.3378 21.5642 11.48 21.5642C11.6222 21.5642 11.7778 22.1264 12.0112 22.1375C12.2223 22.1464 12.5713 21.6087 12.7135 21.5998C12.8557 21.5909 13.0269 22.1486 13.2625 22.1486C13.498 22.1486 13.7536 21.5442 13.9492 21.5331C14.1448 21.522 14.227 22.102 14.4626 22.102C14.6982 22.102 15.0471 21.5175 15.2627 21.5087C15.4783 21.4975 15.5961 22.0686 15.8117 22.0686C16.0272 22.0686 16.2673 21.4887 16.4206 21.482C16.6584 21.4731 16.8096 22.0464 17.1385 22.0575C17.4674 22.0686 17.6008 21.5042 17.8564 21.5042C18.1119 21.5042 18.1853 22.0375 18.472 22.0486C18.7587 22.0597 18.9943 21.4953 19.2099 21.5042C19.4254 21.5153 19.5677 22.0264 19.8055 22.0264C20.0433 22.0264 20.2767 21.5042 20.4522 21.5131C20.6256 21.5242 20.8634 22.0464 21.099 22.0464C21.3346 22.0464 21.5302 21.5064 21.6435 21.502C21.8613 21.4953 22.0836 22.0664 22.3102 22.0464C22.5369 22.0264 22.7992 21.4642 22.9948 21.4731C23.1904 21.4842 23.4904 22.1108 23.726 22.0909C23.9616 22.0709 24.1616 21.4753 24.3772 21.4842C24.5928 21.4931 24.7661 22.0331 25.0395 22.0331C25.2906 22.0331 25.4306 21.5175 25.6573 21.5064C25.884 21.4953 26.0952 21.9997 26.3308 21.9753C26.5663 21.9509 26.6619 21.482 26.8686 21.4731C27.0731 21.462 27.3753 22.0042 27.6731 21.9931C27.971 21.982 28.1243 21.562 28.2888 21.5531C28.4532 21.5442 28.5955 22.0109 28.9955 22.0042C29.3556 21.9997 29.8267 21.3264 29.7334 20.8554C29.6401 20.3843 28.3599 18.5066 28.3599 18.5066L4.15465 18.7244Z" fill="#6C4D43"/>
|
||||||
|
<path d="M6.09496 13.357C6.09496 13.357 4.90148 15.0328 4.1925 16.5641C3.48352 18.0954 3.21016 19.0022 3.16571 19.8956C3.12126 20.7691 3.24794 20.9024 3.24794 20.9024L4.54366 19.4867C4.54366 19.4867 4.55699 20.8247 4.65256 20.838C4.74813 20.8513 5.74603 19.4578 5.8127 19.4445C5.8816 19.4311 5.8816 20.8513 5.97717 20.8513C6.07274 20.8513 7.09731 19.4178 7.16621 19.4178C7.2351 19.4178 7.26177 20.838 7.34401 20.838C7.42624 20.838 8.35524 19.3911 8.42414 19.4045C8.49304 19.4178 8.73751 20.9202 8.81975 20.9202C8.90198 20.9202 9.76209 19.3911 9.85765 19.3911C9.95322 19.3911 10.0621 20.9758 10.171 20.9758C10.2799 20.9758 11.1267 19.4467 11.1956 19.4467C11.2645 19.4467 11.5379 20.9625 11.6468 20.9491C11.7557 20.9358 12.5069 19.4467 12.5758 19.4734C12.6447 19.5 12.8225 20.9358 12.9447 20.9358C13.0669 20.9358 13.7226 19.4334 13.8315 19.4334C13.9404 19.4334 14.216 20.8913 14.2982 20.8913C14.3804 20.8913 15.0627 19.4289 15.145 19.4156C15.2272 19.4023 15.665 21.0269 15.8006 21.0269C15.9362 21.0269 16.3474 19.5245 16.4429 19.5378C16.5385 19.5512 17.1808 20.9713 17.2341 20.9713C17.2875 20.9713 17.7675 19.4823 17.8209 19.4823C17.8742 19.4823 18.5165 20.8335 18.6121 20.8491C18.7076 20.8624 19.0632 19.4978 19.1321 19.5245C19.201 19.5512 19.8567 20.958 19.9389 20.9713C20.0211 20.9847 20.3078 19.4956 20.3901 19.4956C20.4723 19.4956 21.3724 21.1336 21.4413 21.1202C21.5102 21.1069 21.5925 19.4667 21.6725 19.4534C21.7547 19.44 22.8326 21.0647 22.9148 21.0513C22.9971 21.038 22.9548 19.3978 23.0104 19.3978C23.066 19.3978 23.9527 20.9269 24.075 20.9136C24.1972 20.9002 24.3061 19.48 24.3884 19.48C24.4706 19.48 25.4529 21.1469 25.5774 21.1336C25.7019 21.1202 25.6041 19.5756 25.6596 19.5623C25.7152 19.5489 26.8198 20.9558 26.8753 20.9424C26.9309 20.9291 26.9153 19.4267 27.0109 19.4134C27.1065 19.4 28.131 20.8758 28.2266 20.8469C28.3222 20.8202 28.3355 19.3445 28.3911 19.3311C28.4466 19.3178 29.7268 20.8535 29.7268 20.8535C29.7268 20.8535 29.9757 19.5178 29.5357 18.2377C29.0956 16.9575 28.0266 15.1595 27.5087 14.395C26.9931 13.6304 26.6909 13.277 26.6909 13.277L14.0648 11.6591L6.09496 13.357Z" fill="#A37F69"/>
|
||||||
|
<path d="M10.4736 8.22084C10.4736 8.22084 8.78668 9.88105 7.98214 10.8412C7.17759 11.8013 6.09301 13.3548 6.09301 13.3548C6.09301 13.3548 5.69963 15.1728 5.8152 15.1862C5.93299 15.1995 7.08647 13.4615 7.19093 13.4726C7.29539 13.4859 7.02202 15.2239 7.12648 15.2506C7.23093 15.2773 8.51554 13.4482 8.57999 13.4348C8.64444 13.4215 8.3733 15.2373 8.4622 15.2639C8.5511 15.2906 9.85126 13.4482 9.92905 13.4482C10.0068 13.4482 10.1113 15.1484 10.2135 15.1484C10.3158 15.1484 11.1736 13.4237 11.2514 13.4348C11.3292 13.4482 11.5115 15.2128 11.6404 15.2373C11.7693 15.2639 12.3671 13.4082 12.4716 13.3948C12.576 13.3815 12.8339 15.3417 12.9516 15.3417C13.0694 15.3417 13.6917 13.4215 13.7695 13.4215C13.8473 13.4215 14.0429 15.3417 14.1718 15.3417C14.3007 15.3417 14.8852 13.3837 14.963 13.3837C15.0408 13.3837 15.5986 15.2639 15.6898 15.2395C15.7809 15.2128 16.2743 13.3593 16.3654 13.3704C16.4565 13.3837 16.8833 15.1862 17.041 15.2128C17.1966 15.2395 17.6122 13.4615 17.7411 13.4615C17.87 13.4615 18.2079 15.4329 18.3634 15.4329C18.519 15.4329 18.8702 13.4615 18.948 13.4615C19.0257 13.4615 19.7392 15.4084 19.857 15.4195C19.9747 15.4329 20.1037 13.5637 20.2459 13.5504C20.3881 13.5371 21.1549 15.4195 21.2327 15.4062C21.3105 15.3929 21.3749 13.5637 21.4527 13.5504C21.5305 13.5371 22.3995 15.2639 22.5417 15.2639C22.684 15.2639 22.5929 13.4726 22.724 13.4859C22.8529 13.4993 24.1508 15.3662 24.2686 15.3662C24.3864 15.3662 23.9308 13.4193 24.0353 13.3948C24.1397 13.3682 25.5021 15.4706 25.6443 15.4306C25.7866 15.3906 25.2821 13.5237 25.371 13.4971C25.4621 13.4704 26.8756 15.3262 27.0067 15.2751C27.1356 15.2239 26.7 13.277 26.7 13.277C26.7 13.277 25.3976 11.5768 24.7242 10.7478C24.0486 9.91661 22.9862 8.81425 22.9862 8.81425L17.7478 6.19836L10.4736 8.22084Z" fill="#BD9177"/>
|
||||||
|
<path d="M10.4734 8.2202C10.4734 8.2202 9.83556 9.42236 9.96447 9.49791C10.0934 9.57346 11.6736 8.05576 11.8269 8.09354C11.9803 8.13131 11.3157 9.70012 11.5336 9.75123C11.7514 9.80234 12.7959 8.0291 12.9248 8.05354C13.0515 8.07798 12.6559 9.77567 12.8604 9.84011C13.0649 9.90455 13.945 7.9891 14.085 8.01576C14.225 8.04021 14.1872 9.929 14.3139 9.94233C14.4406 9.95566 15.0918 8.10465 15.1807 8.10465C15.2696 8.10465 15.5252 10.0579 15.6785 10.069C15.8319 10.0823 16.2897 8.03576 16.3919 8.03576C16.4942 8.03576 17.0053 9.96677 17.172 9.96677C17.3387 9.96677 17.4387 8.01799 17.5276 7.98021C17.6165 7.94244 18.3633 9.85122 18.5767 9.85122C18.7611 9.85122 18.4478 7.95132 18.5633 7.92466C18.6789 7.90021 19.7368 9.889 19.9546 9.87789C20.1724 9.86456 19.7946 8.02243 19.8968 8.02243C19.9991 8.02243 21.1681 9.86456 21.3592 9.86456C21.5504 9.86456 20.9592 7.99132 21.0747 7.96466C21.1903 7.94021 22.9305 9.60679 23.0328 9.58013C23.135 9.55568 22.9817 8.81128 22.9817 8.81128C22.9817 8.81128 18.7833 4.49595 16.4342 4.48484C14.0339 4.47151 10.4734 8.2202 10.4734 8.2202Z" fill="#D2A590"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 9.1 KiB |
1
frontend/public/Icons/awwSnap.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="33" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M15.91 28.675c-6.199 0-12.888-3.888-12.888-12.421S9.711 3.832 15.911 3.832c3.444 0 6.621 1.134 8.977 3.2 2.555 2.267 3.91 5.466 3.91 9.222 0 3.755-1.355 6.933-3.91 9.2-2.356 2.066-5.555 3.221-8.977 3.221z" fill="url(#prefix__paint0_radial_2122_6520)"/><path d="M26.552 8.87c1.185 1.91 1.803 4.186 1.803 6.717 0 3.756-1.356 6.933-3.911 9.2-2.356 2.066-5.556 3.222-8.978 3.222-4.013 0-8.221-1.634-10.706-5.098 2.391 3.924 6.889 5.764 11.15 5.764 3.423 0 6.623-1.155 8.978-3.222 2.555-2.266 3.911-5.444 3.911-9.2 0-2.83-.771-5.346-2.247-7.383z" fill="#EB8F00"/><path d="M20.123 22.905c0 1.685-1.846 2.667-4.124 2.667-2.277 0-4.124-.989-4.124-2.667 0-1.677 1.847-3.522 4.124-3.522 2.278 0 4.124 1.838 4.124 3.522zM12.06 14.852l1.88-1.748c.267-.331.307-.778.038-1.045-.353-.355-.98-.269-1.32.136-.018.033-.03.042-.049.075l-1.333 1.938-1.804-1.682c-.027-.03-.042-.034-.067-.062-.42-.32-1.05-.267-1.315.157-.207.32-.07.745.264 1.011l2.313 1.372-1.96 1.833c-.262.326-.31.77-.04 1.044.351.358.978.276 1.32-.127.018-.033.031-.042.051-.075l1.405-2.031 1.706 1.609c.027.029.043.035.067.064.418.322 1.049.273 1.318-.149.206-.32.07-.746-.26-1.013l-2.213-1.307zM20.61 14.852l-1.879-1.748c-.267-.331-.307-.778-.036-1.045.354-.355.978-.269 1.318.136.018.033.034.042.051.075l1.334 1.938 1.806-1.682c.025-.03.04-.034.065-.062.422-.32 1.05-.267 1.317.157.205.32.067.745-.266 1.011L22 15.004l1.96 1.833c.268.33.313.775.042 1.044-.349.358-.976.276-1.318-.127-.02-.033-.033-.042-.051-.075l-1.404-2.031-1.71 1.609c-.024.029-.04.035-.066.064-.418.322-1.046.273-1.315-.149-.21-.32-.074-.746.257-1.013l2.216-1.307zM11.911 8.696c.511.044.711-.645.178-.8a4.07 4.07 0 00-1.289-.133A4.596 4.596 0 007.689 9.14c-.378.4.156.89.556.6a5.829 5.829 0 013.666-1.044zM20.044 8.696a5.85 5.85 0 013.689 1.044c.4.29.933-.2.555-.6a4.645 4.645 0 00-3.11-1.377 4.07 4.07 0 00-1.29.133.408.408 0 00-.282.504c.053.194.24.318.438.296z" fill="#422B0D"/><defs><radialGradient id="prefix__paint0_radial_2122_6520" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(15.91 16.254) scale(12.657)"><stop offset=".5" stop-color="#FDE030"/><stop offset=".92" stop-color="#F7C02B"/><stop offset="1" stop-color="#F4A223"/></radialGradient></defs></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
frontend/public/Icons/cable-car.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_2022_1972)" stroke="#fff" stroke-width="1.333" stroke-linecap="round" stroke-linejoin="round"><path d="M6.667 2h.006M9.333 1.333h.007M1.333 6l13.334-3.333M8 8V4.333M11.333 8H4.667a2 2 0 00-2 2v2.667a2 2 0 002 2h6.666a2 2 0 002-2V10a2 2 0 00-2-2zM6 8v3.333M10 8v3.333M2.667 11.334h10.666"/></g><defs><clipPath id="prefix__clip0_2022_1972"><path fill="#fff" d="M0 0h16v16H0z"/></clipPath></defs></svg>
|
||||||
|
After Width: | Height: | Size: 507 B |
1
frontend/public/Icons/configure.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g stroke="#C0C1C3" stroke-width="1.333" stroke-linecap="round"><path d="M9.71 4.745a.576.576 0 000 .806l.922.922a.576.576 0 00.806 0l2.171-2.171a3.455 3.455 0 01-4.572 4.572l-3.98 3.98a1.222 1.222 0 11-1.727-1.728l3.98-3.98a3.455 3.455 0 014.572-4.572L9.717 4.739l-.006.006z" stroke-linejoin="round"/><path d="M4 7L2.527 5.566a1.333 1.333 0 01-.013-1.898l.81-.81a1.333 1.333 0 011.991.119L5.333 3M10.75 10.988l1.179 1.178m0 0l-.138.138a.833.833 0 00.387 1.397v0a.833.833 0 00.792-.219l.446-.446a.833.833 0 00.176-.917v0a.833.833 0 00-1.355-.261l-.308.308z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 644 B |
1
frontend/public/Icons/dashboard_emoji.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M13.72 12.839l-9.054.92s.05.649.236.798c.178.142 5.617.066 11.048.088 5.433.023 10.82.125 10.944.072.249-.107.249-.992.249-.992l-13.424-.886zM16.55 7.787l-12.623-.32s.275.61.637.813c.523.29 3.71.889 11.518.918 7.808.028 10.635-.4 11.317-.678.58-.238 1.215-1.576 1.215-1.576l-12.064.843z" fill="#8A1E0C"/><path d="M21.95 8.658v1.335l2.176-.087V8.542l-2.176.116z" fill="#8A1E0C"/><path d="M21.948 9.566h2.177v16.797l-2.206.294.029-17.09z" fill="#EB2901"/><path d="M21.355 26.19c-.111.193-.111 2.297-.007 2.444.105.147 3.242.104 3.326 0 .085-.104.063-2.38 0-2.464-.062-.085-3.235-.125-3.32.02z" fill="#474C4F"/><path d="M8.462 9.85V8.488l2.042.125v1.22l-2.042.017z" fill="#8A1E0C"/><path d="M8.462 9.55l-.038 17.051 2.08-.207V9.566l-2.042-.015z" fill="#EB2901"/><path d="M7.804 25.919c-.073.073-.147 2.36-.02 2.464.125.104 3.14.129 3.244.024.105-.104.085-2.304.023-2.43-.063-.127-3.142-.163-3.247-.058z" fill="#474C4F"/><path d="M14.788 8.107v4.876l2.393-.33V8.108h-2.393z" fill="#EB2901"/><path d="M27.067 11.978c-.115-.16-.482-.138-.482-.138l-1.137-.013c.002-.398-.01-.913-.078-.996-.116-.137-4.542-.09-4.702.047-.091.078-.11.527-.107.898-2.738-.027-5.99-.058-8.83-.076 0-.384-.012-.849-.078-.915-.116-.116-4.22-.185-4.38-.07-.113.083-.136.647-.138.97-1.384.002-2.275.013-2.34.04-.322.137-.137 2.042-.137 2.042l22.476.16c.002.002.049-1.787-.067-1.95z" fill="#EB2901"/><path d="M3.93 6.942s-.646-.34-1.377-1.573c-.509-.858-.595-1.658-.387-1.778.21-.12 2.154 1.08 5.745 1.616a60.81 60.81 0 008.173.644c2.884.027 5.717-.135 8.397-.644 3.62-.689 4.906-1.436 5.264-1.316.36.12-.109 1.227-.369 1.78-.178.376-.944 1.77-1.515 1.87-.411.072-19.953-.09-19.953-.09l-3.977-.509z" fill="#474C4F"/><path d="M3.31 5.724c-.108.137-.057.457.212 1.06.107.237.415.782.529.917 0 0 2.982.756 11.977.7 8.995-.055 12.108-.62 12.108-.62s.911-1.277.745-1.32c-.096-.024-4.847.98-12.909.898C7.911 7.277 3.311 5.724 3.311 5.724z" fill="#EB2901"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.0 KiB |
1
frontend/public/Icons/dashboards.svg
Normal file
|
After Width: | Height: | Size: 5.2 KiB |
1
frontend/public/Icons/emptyState.svg
Normal file
|
After Width: | Height: | Size: 5.6 KiB |
1
frontend/public/Icons/group.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g stroke="#C0C1C3" stroke-width="1.333" stroke-linecap="round" stroke-linejoin="round"><path d="M2 4.667V3.333C2 2.6 2.6 2 3.333 2h1.334M11.333 2h1.334C13.4 2 14 2.6 14 3.333v1.334M14 11.334v1.333C14 13.4 13.4 14 12.667 14h-1.334M4.667 14H3.333C2.6 14 2 13.4 2 12.667v-1.333M8.667 4.667H5.333a.667.667 0 00-.666.666v2c0 .368.298.667.666.667h3.334a.667.667 0 00.666-.667v-2a.667.667 0 00-.666-.667zM10.667 8H7.333a.667.667 0 00-.666.667v2c0 .368.298.666.666.666h3.334a.667.667 0 00.666-.666v-2A.667.667 0 0010.667 8z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 604 B |
1
frontend/public/Icons/landscape.svg
Normal file
|
After Width: | Height: | Size: 6.1 KiB |
BIN
frontend/public/Icons/loading-plane.gif
Normal file
|
After Width: | Height: | Size: 88 KiB |
1
frontend/public/Icons/redis-logo.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="24" height="24" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M23.06 17.526c-1.281.668-7.916 3.396-9.328 4.132-1.413.736-2.198.73-3.314.196C9.303 21.32 2.242 18.468.97 17.86c-.636-.303-.97-.56-.97-.802v-2.426s9.192-2.001 10.676-2.534c1.484-.532 1.999-.551 3.262-.089 1.263.463 8.814 1.826 10.062 2.283v2.391c0 .24-.288.503-.94.843z" fill="#912626"/><path d="M23.06 15.114c-1.281.668-7.916 3.396-9.329 4.132-1.412.737-2.197.73-3.313.196C9.302 18.91 2.242 16.056.97 15.45c-1.272-.608-1.298-1.027-.049-1.516 1.25-.49 8.271-3.244 9.755-3.776 1.484-.533 1.999-.552 3.262-.09 1.263.463 7.858 3.088 9.106 3.546 1.248.457 1.296.834.015 1.501z" fill="#C6302B"/><path d="M23.06 13.6c-1.281.668-7.916 3.396-9.328 4.133-1.413.736-2.198.73-3.314.196S2.242 14.543.97 13.935c-.636-.304-.97-.56-.97-.802v-2.426s9.192-2.001 10.676-2.534c1.484-.532 1.999-.551 3.262-.089C15.2 8.547 22.752 9.91 24 10.366v2.392c0 .24-.288.503-.94.843z" fill="#912626"/><path d="M23.06 11.19c-1.281.667-7.916 3.395-9.329 4.131-1.412.737-2.197.73-3.313.196-1.116-.533-8.176-3.386-9.448-3.993-1.272-.608-1.298-1.027-.049-1.516 1.25-.49 8.271-3.244 9.755-3.776 1.484-.533 1.999-.552 3.262-.09 1.263.463 7.858 3.088 9.106 3.545 1.248.458 1.296.835.015 1.502z" fill="#C6302B"/><path d="M23.06 9.53c-1.281.668-7.916 3.396-9.328 4.132-1.413.737-2.198.73-3.314.196-1.116-.533-8.176-3.386-9.448-3.993C.334 9.56 0 9.305 0 9.062V6.636s9.192-2 10.676-2.533c1.484-.533 1.999-.552 3.262-.09C15.2 4.477 22.752 5.84 24 6.297v2.392c0 .24-.288.502-.94.842z" fill="#912626"/><path d="M23.06 7.118c-1.281.668-7.916 3.396-9.329 4.132-1.412.737-2.197.73-3.313.196C9.303 10.913 2.242 8.061.97 7.453-.302 6.845-.328 6.427.921 5.937c1.25-.489 8.271-3.244 9.755-3.776 1.484-.532 1.999-.552 3.262-.089 1.263.463 7.858 3.088 9.106 3.545 1.248.457 1.296.834.015 1.501z" fill="#C6302B"/><path d="M14.933 4.758l-2.064.215-.462 1.111-.746-1.24L9.28 4.63l1.778-.641-.534-.985 1.665.651 1.569-.513-.424 1.017 1.6.6zm-2.649 5.393l-3.85-1.597 5.517-.847-1.667 2.444zM6.945 5.376c1.63 0 2.95.512 2.95 1.143 0 .632-1.32 1.144-2.95 1.144-1.629 0-2.95-.512-2.95-1.144 0-.63 1.321-1.143 2.95-1.143z" fill="#fff"/><path d="M17.371 5.062l3.266 1.29-3.263 1.29-.003-2.58z" fill="#621B1C"/><path d="M13.758 6.492l3.613-1.43.003 2.58-.354.139-3.262-1.29z" fill="#9A2928"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
frontend/public/Icons/tetra-pack.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="33" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M14.309 13.108l-6.704-3.32s-.016-.317.284-.477c.302-.16 5.053-2.107 5.435-2.107.383 0 2.62.431 4.249.793 1.629.363 5.933 1.287 5.953 1.57.02.281-4.404 4.806-4.404 4.806l-4.813-1.265z" fill="#C3FECE"/><path d="M20.423 11.037s-2.811-.826-5.546-1.469c-1.274-.3-5.016-1.084-5.016-1.084s.398-.173.698-.3c.305-.127.547-.193.547-.193s2.44.486 4.253.873c2.453.522 5.886 1.547 5.966 1.709.082.16-.902.464-.902.464z" fill="#fff"/><path d="M14.98 10.26c-.598.415-.011.666 1.09.924 1.207.282 2.127.698 2.903.247.7-.405-1.014-.845-1.8-1.014-.6-.129-1.731-.478-2.193-.158z" fill="#ACB1B2"/><path d="M17.17 11.095c-.005 0 .02-4.869.02-5.049 0-.18-.203-.342.02-.724.222-.382.804-.342.804-.342s2.416-.702 3.38-.945c.964-.242 3.098-.804 3.098-.804l.142 1.22s-2.236.631-3.342.913c-1.107.282-2.616.745-2.616.745l-.222.202.064 4.757s-.206.231-.668.231c-.45-.002-.68-.204-.68-.204z" fill="#FFD816"/><path d="M24.095 3.855c.018.38.22.616.46.616.24 0 .404-.307.369-.707-.038-.398-.296-.58-.516-.506-.22.073-.327.32-.313.597zM18.46 6.422a.209.209 0 01-.123-.038l-1.153-.769a.225.225 0 01-.063-.309.222.222 0 01.31-.062l1.153.769a.224.224 0 01.062.309.228.228 0 01-.187.1z" fill="#FEB804"/><path d="M18.636 6.235a.225.225 0 01-.178-.089c-.295-.393-.633-.84-.693-.909a.225.225 0 01-.031-.284.222.222 0 01.309-.062c.04.027.062.042.771.986.073.098.007.238-.091.312-.04.03-.04.046-.087.046z" fill="#FEB804"/><path d="M18.365 6.609c-.01 0-.022 0-.035-.003l-1.111-.175a.221.221 0 11.069-.438l1.11.176c.12.02.225.042.205.164-.016.107-.129.276-.238.276z" fill="#FEB804"/><path d="M7.596 9.764c.353 0 3.188.744 4.65 1.013 1.463.27 5.878 1.314 6.027 1.342.149.03.12 1.94.12 1.94s2.089 10.8 2.029 11.309c-.06.506-1.431 4.415-1.431 4.415s-.807.12-2.865-.478c-2.057-.598-7.488-2.089-7.817-2.506-.329-.418-.12-5.938-.298-9.338-.182-3.402-.415-7.697-.415-7.697z" fill="#79DD8A"/><path d="M24.06 27.036c.113-.375-.518-4.402-.607-8.101-.089-3.698.229-9.324.076-9.369-.154-.042-5.256 2.553-5.256 2.553s-.022 3.671.04 7.133c.08 4.48.438 10.41.676 10.53.238.12 2.302-1.035 2.924-1.372 1.102-.598 2.058-1.074 2.147-1.374z" fill="#02AB46"/><path d="M20.408 13.82l.011-2.787.914-.45.026 3.056-.422.74-.529-.56z" fill="#DBDFE1"/><path d="M12.322 14.797c-1.973-.211-3.34 1.549-3.233 3.842.127 2.709 1.91 4.704 3.842 5.102 1.93.398 3.802-.44 3.842-3.402.044-3.087-2.669-5.353-4.451-5.542z" fill="#FEFEFD"/><path d="M13.637 17.27s-.4-1.344-1.602-.986c-1.202.357-1.853 2.973.187 4.15 1.96 1.131 3.764-.944 3.133-2.288-.574-1.227-1.718-.876-1.718-.876z" fill="#EF5B44"/><path d="M13.18 15.626c-.136.049-.243.602-.1 1.13.106.396.446.939.643.903.158-.029.278-.651.13-1.173-.174-.602-.516-.918-.674-.86z" fill="#B8CF17"/><path d="M13.15 18.746c-.564-.171-1.2 1.769-.057 2.977 1.26 1.331 2.73.158 2.69-.1-.057-.358-1.044-.615-1.53-1.215-.487-.605-.774-1.562-1.102-1.662z" fill="#FD8F01"/><path d="M11.346 18.417s.113-.849-.673-.802c-.76.046-.574.944-.574.944s-.633.076-.526.778c.08.53.64.524.64.524s-.616.242-.336.945c.249.624.822.373.822.373s-.21.609.287.93c.42.272.787.043.787.043s-.023.52.557.616c.703.115 1.007-.74.507-1.136-.38-.3-.724-.067-.724-.067s.07-.166.004-.357c-.045-.125-.116-.171-.116-.171s.616-.058.516-.758c-.1-.702-.716-.616-.716-.616s.358-.286.216-.802c-.14-.518-.671-.444-.671-.444z" fill="#A281D0"/><path d="M21.04 14.595c-.511 0-2.691-2.167-2.711-2.189a.222.222 0 01.024-.313.224.224 0 01.314.022c.14.155 1.806 1.702 2.286 2 .311-.465 1.322-2.498 2.191-4.333a.224.224 0 01.296-.107.223.223 0 01.106.296c-2.142 4.526-2.353 4.586-2.466 4.617-.013.007-.027.007-.04.007z" fill="#2D802D"/></svg>
|
||||||
|
After Width: | Height: | Size: 3.6 KiB |