Compare commits
740 Commits
v0.26.1
...
v0.42.3-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c99a112dc7 | ||
|
|
e4d9c4e239 | ||
|
|
781732f25a | ||
|
|
77e55a0ec9 | ||
|
|
a34c59762b | ||
|
|
397da5857f | ||
|
|
43ceb052d8 | ||
|
|
6eced60bf5 | ||
|
|
7c2f5352d2 | ||
|
|
e6e377beff | ||
|
|
6da9de6591 | ||
|
|
7549aee656 | ||
|
|
da4a6266c5 | ||
|
|
6ac938f2a6 | ||
|
|
990fc83269 | ||
|
|
5d5ff47d5e | ||
|
|
9f30bba9a8 | ||
|
|
6014bb76b6 | ||
|
|
e25b54f86a | ||
|
|
5959963b9d | ||
|
|
31b1d58a70 | ||
|
|
0ac9f6f663 | ||
|
|
a30b75a2a8 | ||
|
|
dbd4363ff8 | ||
|
|
ad1b01f225 | ||
|
|
e1679790f7 | ||
|
|
ae594061e9 | ||
|
|
9e02147d4c | ||
|
|
2b3d1c8ee5 | ||
|
|
4c91dbcff0 | ||
|
|
83f68f13db | ||
|
|
994814864c | ||
|
|
f24135f5b0 | ||
|
|
5745727031 | ||
|
|
ae0d685b29 | ||
|
|
f34a49e19c | ||
|
|
9e557a0ebe | ||
|
|
0df3c26f04 | ||
|
|
0df86454ce | ||
|
|
63f0ae1c7c | ||
|
|
d9f232683d | ||
|
|
ad9d77d33f | ||
|
|
5a8479f4e9 | ||
|
|
f4e94c0ad1 | ||
|
|
6f3183823f | ||
|
|
01bb39da6a | ||
|
|
43f9830e8d | ||
|
|
4c2174958f | ||
|
|
07747e73d6 | ||
|
|
60946b5e9d | ||
|
|
0365fa5421 | ||
|
|
cf22039562 | ||
|
|
2a62982885 | ||
|
|
1e1624ed4c | ||
|
|
6b3af78873 | ||
|
|
6adeef7e70 | ||
|
|
44dc55c5ac | ||
|
|
3c419677e1 | ||
|
|
aadb962b6c | ||
|
|
c6080ca02e | ||
|
|
506448fe61 | ||
|
|
a42176599f | ||
|
|
adef0a4138 | ||
|
|
c9816cce18 | ||
|
|
c6c2b9d809 | ||
|
|
d9b379ae51 | ||
|
|
dd2afe19f6 | ||
|
|
0326a4d42a | ||
|
|
b4d12966f3 | ||
|
|
5a2d729ba9 | ||
|
|
666916fae2 | ||
|
|
4b4008642d | ||
|
|
7c2007faa3 | ||
|
|
6b87118fc6 | ||
|
|
49aba4fb1c | ||
|
|
9ace374855 | ||
|
|
a4d5774ae3 | ||
|
|
d0d10daa44 | ||
|
|
e519539468 | ||
|
|
7051831539 | ||
|
|
c842e68288 | ||
|
|
a295bf2fb6 | ||
|
|
4cd40391c5 | ||
|
|
7af4ba34af | ||
|
|
54c69311ed | ||
|
|
62af836554 | ||
|
|
f9b3ca01f9 | ||
|
|
0c4149225f | ||
|
|
7136ecc2fe | ||
|
|
0c14145ef9 | ||
|
|
6618b47123 | ||
|
|
ab5285dee6 | ||
|
|
fdd7e022e9 | ||
|
|
90d7f0200a | ||
|
|
2713e186d3 | ||
|
|
ffdb4cfff0 | ||
|
|
b3b7522250 | ||
|
|
0870030d1c | ||
|
|
3fece44aef | ||
|
|
e5de35a769 | ||
|
|
44ff1517d1 | ||
|
|
d77389abe3 | ||
|
|
1a62a13aea | ||
|
|
97fdba0fae | ||
|
|
5c2a9e8362 | ||
|
|
1aaafa4638 | ||
|
|
4eb1948e4c | ||
|
|
fe0ba5e3ba | ||
|
|
8add13743a | ||
|
|
9964e3425a | ||
|
|
ddaa464d97 | ||
|
|
8f9d643923 | ||
|
|
d9ab100da3 | ||
|
|
7d32c63398 | ||
|
|
89c6eba913 | ||
|
|
c38247abe4 | ||
|
|
f9eddc9b18 | ||
|
|
17de5836bd | ||
|
|
fe37a2e7e0 | ||
|
|
aad840da59 | ||
|
|
f2d5d21581 | ||
|
|
f3bc1a8f8a | ||
|
|
7bca847f11 | ||
|
|
0cb60e1c10 | ||
|
|
ecd5ce92c2 | ||
|
|
aa67b47053 | ||
|
|
e2669eb370 | ||
|
|
c4bbbf372c | ||
|
|
0c59953cb5 | ||
|
|
b10f17de78 | ||
|
|
bbf9787fb3 | ||
|
|
d11c1eb439 | ||
|
|
548c531956 | ||
|
|
4e75479831 | ||
|
|
633b551e5d | ||
|
|
f734142419 | ||
|
|
aa9a3e9349 | ||
|
|
ab950135ff | ||
|
|
b4e0e89b05 | ||
|
|
12a33960ff | ||
|
|
65ed0c0c05 | ||
|
|
6eb7693294 | ||
|
|
7ec25b4f62 | ||
|
|
b3bc78d23c | ||
|
|
bd4786f128 | ||
|
|
81241170e5 | ||
|
|
e0df371a8d | ||
|
|
cfea51d9ee | ||
|
|
037f5ae4c8 | ||
|
|
d6b7587bbe | ||
|
|
0dffd86287 | ||
|
|
c75a44c620 | ||
|
|
cbf3041dde | ||
|
|
d0b43f3802 | ||
|
|
1ee672c020 | ||
|
|
ad8924ed13 | ||
|
|
cff0e1cf1e | ||
|
|
02f83e4b4a | ||
|
|
6bc5ceac3e | ||
|
|
3a20862d0c | ||
|
|
0e331dd177 | ||
|
|
ab4f6adb19 | ||
|
|
50834be4db | ||
|
|
260d21afd0 | ||
|
|
3b98073ad4 | ||
|
|
6bd2c1ba74 | ||
|
|
968cc0eb82 | ||
|
|
3ce385ef23 | ||
|
|
c6581782d0 | ||
|
|
61977ebe86 | ||
|
|
56b71d0f02 | ||
|
|
f6ab060545 | ||
|
|
554c4332c4 | ||
|
|
9d689693b4 | ||
|
|
26bc94fc46 | ||
|
|
6837c41090 | ||
|
|
8fe0e60208 | ||
|
|
00b111fbe3 | ||
|
|
0bebd3e338 | ||
|
|
d5e0a26f55 | ||
|
|
48ebe91713 | ||
|
|
5bc3c074f8 | ||
|
|
f5b5a9a657 | ||
|
|
ac835c80e9 | ||
|
|
2cf0bb4fa5 | ||
|
|
0f44246795 | ||
|
|
64307f323f | ||
|
|
616b8e0a45 | ||
|
|
2c0690a8ee | ||
|
|
2f361de693 | ||
|
|
457380c065 | ||
|
|
96e3d00e74 | ||
|
|
d224e08145 | ||
|
|
13ced00a35 | ||
|
|
5c60a862e5 | ||
|
|
78c9330666 | ||
|
|
01fc7a7fd4 | ||
|
|
0200fb3a21 | ||
|
|
e977963763 | ||
|
|
824d9aaf85 | ||
|
|
4db3e5e542 | ||
|
|
a8b293a510 | ||
|
|
4a4f48cec8 | ||
|
|
7e5cf65ea3 | ||
|
|
bb7417ffbd | ||
|
|
085cf34a49 | ||
|
|
be27a92fc9 | ||
|
|
253137a6b8 | ||
|
|
fce7ab7d24 | ||
|
|
71f6b355c4 | ||
|
|
110b545454 | ||
|
|
5b0e3d375a | ||
|
|
9e05cb48fe | ||
|
|
6d67ca72a0 | ||
|
|
0626081eee | ||
|
|
199d52b39f | ||
|
|
204cad8448 | ||
|
|
8c6096d60e | ||
|
|
9de9fb5863 | ||
|
|
64d854ffa2 | ||
|
|
6b073280a4 | ||
|
|
79e6699b37 | ||
|
|
d563778479 | ||
|
|
255b3dd3b0 | ||
|
|
00e97fa401 | ||
|
|
9755ba6b47 | ||
|
|
f3fdd2dd6c | ||
|
|
d4248fe933 | ||
|
|
a8d70206ab | ||
|
|
7b344f7a75 | ||
|
|
f0669a6dc1 | ||
|
|
4a7d972c85 | ||
|
|
51c1f88593 | ||
|
|
c1b9049176 | ||
|
|
46559014f7 | ||
|
|
0c1a500142 | ||
|
|
26d6a869c6 | ||
|
|
f99da73098 | ||
|
|
4a1c48b72b | ||
|
|
1163c16506 | ||
|
|
bb558dde8e | ||
|
|
e89c000252 | ||
|
|
4e8e7745c1 | ||
|
|
512fcda33d | ||
|
|
6f43b085b0 | ||
|
|
54038b8ddf | ||
|
|
00c9ef50de | ||
|
|
52750e5248 | ||
|
|
cbce1b1847 | ||
|
|
abaf6126e5 | ||
|
|
739b1bf387 | ||
|
|
cbf150ef7b | ||
|
|
c28f367f46 | ||
|
|
1e679a0d64 | ||
|
|
6f5f361a7e | ||
|
|
d65d75ef69 | ||
|
|
722a38491e | ||
|
|
361efd3b52 | ||
|
|
7b46f86f7f | ||
|
|
5b39dc36d6 | ||
|
|
5fe7948be9 | ||
|
|
a47a90b0f3 | ||
|
|
be6bca3717 | ||
|
|
92717774a2 | ||
|
|
e7fabca38e | ||
|
|
525dea343c | ||
|
|
7d960b79dd | ||
|
|
bdd7778e58 | ||
|
|
105216de3e | ||
|
|
3072b7eb01 | ||
|
|
fd9a502012 | ||
|
|
cf6dc827cc | ||
|
|
6530873994 | ||
|
|
c9c0bd38be | ||
|
|
9ac22fcb10 | ||
|
|
86ff865842 | ||
|
|
e792c48f6d | ||
|
|
8ee92516ca | ||
|
|
79c05d8fa8 | ||
|
|
019bc8c1df | ||
|
|
d688399b91 | ||
|
|
cfc239e3c9 | ||
|
|
3572baa5eb | ||
|
|
ff26c5f69c | ||
|
|
9230f2442f | ||
|
|
7fed80b145 | ||
|
|
a268bb910c | ||
|
|
fbbe0bef86 | ||
|
|
bcd6ac47f7 | ||
|
|
ec27916fa5 | ||
|
|
263ac9fa5a | ||
|
|
e3b2882811 | ||
|
|
63efb2b25a | ||
|
|
788a38d39c | ||
|
|
bff39daef0 | ||
|
|
f2521b4c49 | ||
|
|
16b846006a | ||
|
|
0ea8e8e6b8 | ||
|
|
da0ea7eb49 | ||
|
|
ca41d7011e | ||
|
|
f15e8f2fed | ||
|
|
2e0fdbb498 | ||
|
|
585d6e2a21 | ||
|
|
d93cc767a6 | ||
|
|
a363b98657 | ||
|
|
2031a014a7 | ||
|
|
43d5ee9651 | ||
|
|
f8bb42a13c | ||
|
|
1be4731710 | ||
|
|
90b8959045 | ||
|
|
f487c1956b | ||
|
|
6b2f03d43f | ||
|
|
581bd07b35 | ||
|
|
c5cba68b53 | ||
|
|
a6b6abf1a7 | ||
|
|
7526888886 | ||
|
|
ce8fdd509b | ||
|
|
2baa6028b5 | ||
|
|
8e653f9500 | ||
|
|
cb1a823f91 | ||
|
|
c0b0920901 | ||
|
|
77b4e71543 | ||
|
|
9d44ce3ee2 | ||
|
|
1d014ab4f7 | ||
|
|
418ab67d50 | ||
|
|
7efe907757 | ||
|
|
1d1154aa8c | ||
|
|
a16fca6376 | ||
|
|
9c1ea0cde9 | ||
|
|
ec500831ef | ||
|
|
fcbf82c2f3 | ||
|
|
a805eb7533 | ||
|
|
a8edc4fd95 | ||
|
|
c66c8c2823 | ||
|
|
c7b59d4405 | ||
|
|
f56b5cb971 | ||
|
|
29b1344557 | ||
|
|
55664872bd | ||
|
|
221861230a | ||
|
|
8b1a781f58 | ||
|
|
b557ca5519 | ||
|
|
e557ff273f | ||
|
|
3c284fc9ee | ||
|
|
bcebe050b1 | ||
|
|
9360c61dca | ||
|
|
fb1dbdc05e | ||
|
|
6170b2c5dc | ||
|
|
9826ab04b3 | ||
|
|
fd9566d471 | ||
|
|
3a1e8d523a | ||
|
|
6dd34a7f29 | ||
|
|
170e5e1686 | ||
|
|
16502feaad | ||
|
|
09d579311e | ||
|
|
8072fede85 | ||
|
|
112783d618 | ||
|
|
4644b1c200 | ||
|
|
bb09c84679 | ||
|
|
fc5f0fbf9e | ||
|
|
d6f0559adc | ||
|
|
0d7f7df76c | ||
|
|
7104d8e0f5 | ||
|
|
a20693fa9f | ||
|
|
0b991331d7 | ||
|
|
aad44a1037 | ||
|
|
3e29161fea | ||
|
|
b616dca52d | ||
|
|
be519666a3 | ||
|
|
a48edac13b | ||
|
|
0a77c7ab85 | ||
|
|
9fb32acf6d | ||
|
|
b2d6d75eef | ||
|
|
07d126c669 | ||
|
|
50d584cc89 | ||
|
|
1b6b3c2fdf | ||
|
|
1f0fdfd403 | ||
|
|
ae3b604cdc | ||
|
|
381f497b95 | ||
|
|
8045c4e5ae | ||
|
|
7451e885c3 | ||
|
|
01df53074c | ||
|
|
b6a79ab22c | ||
|
|
dae817640b | ||
|
|
16839eb7d3 | ||
|
|
780a863943 | ||
|
|
5e0b6366cc | ||
|
|
8eb2b9e3d0 | ||
|
|
97ed163002 | ||
|
|
e18bb7d5bc | ||
|
|
1e4cf2513c | ||
|
|
988ede7776 | ||
|
|
d1acad8ee4 | ||
|
|
f5b1d4146f | ||
|
|
feaac39e2a | ||
|
|
fc4cdea539 | ||
|
|
399d49b3c0 | ||
|
|
ec8a74d385 | ||
|
|
7c87310fa6 | ||
|
|
349c4020f5 | ||
|
|
92e2f1c467 | ||
|
|
e3a89be86b | ||
|
|
40090aaf12 | ||
|
|
4009ac83fe | ||
|
|
e7f9c3981b | ||
|
|
fe75f6347b | ||
|
|
bc72b5fcea | ||
|
|
a54cf38e21 | ||
|
|
94d99ee0a4 | ||
|
|
c109636889 | ||
|
|
d9950d9223 | ||
|
|
a578f9509a | ||
|
|
b1e4ee1d26 | ||
|
|
31b07cc02c | ||
|
|
d42bf50ddb | ||
|
|
93a11b2031 | ||
|
|
af71474bec | ||
|
|
bc942d218b | ||
|
|
f2e7f09a32 | ||
|
|
7e87df2d69 | ||
|
|
c0226ab584 | ||
|
|
84f2885533 | ||
|
|
e58ecff19b | ||
|
|
f4ecfb510a | ||
|
|
c4536f9069 | ||
|
|
2a55f3d680 | ||
|
|
5d6eea3045 | ||
|
|
12029a6d90 | ||
|
|
4083970289 | ||
|
|
b3c0681a85 | ||
|
|
36aced6d1a | ||
|
|
bad69abcc2 | ||
|
|
d091d90d66 | ||
|
|
29bfdb8909 | ||
|
|
31b5635339 | ||
|
|
73fc262f04 | ||
|
|
dc23368f6e | ||
|
|
75526c6de5 | ||
|
|
5b419cb668 | ||
|
|
d8a8430a5b | ||
|
|
dc7a55e871 | ||
|
|
9333fdcd0b | ||
|
|
58ccbdbec4 | ||
|
|
12819113c1 | ||
|
|
37f61ebe60 | ||
|
|
f2f89eb38b | ||
|
|
a99d7f09a1 | ||
|
|
2ae75e6196 | ||
|
|
f86fc03fd6 | ||
|
|
5a9f626da5 | ||
|
|
758013d7cd | ||
|
|
ddc3cc4911 | ||
|
|
6b2f857a12 | ||
|
|
30b0d42604 | ||
|
|
88aabb2060 | ||
|
|
f939d41acd | ||
|
|
d165f727ac | ||
|
|
e4ef137c72 | ||
|
|
dda01678e8 | ||
|
|
3e65543b5f | ||
|
|
050b866173 | ||
|
|
0906886e9a | ||
|
|
8371670512 | ||
|
|
123f2e7d52 | ||
|
|
0ab09c1c67 | ||
|
|
9f5039dbf3 | ||
|
|
5e349d8294 | ||
|
|
b5654c8bfa | ||
|
|
71e487dc0c | ||
|
|
2d60805b28 | ||
|
|
7603e0ebe0 | ||
|
|
1e8a8d19ea | ||
|
|
092d164d55 | ||
|
|
0400d5378b | ||
|
|
626da7533e | ||
|
|
bff7142a61 | ||
|
|
ed3017d247 | ||
|
|
ec3eba612c | ||
|
|
b958a06ba0 | ||
|
|
64f0ff05f9 | ||
|
|
f94a5f4481 | ||
|
|
27869f03bd | ||
|
|
9c21449239 | ||
|
|
991e39aad3 | ||
|
|
eddb607c9c | ||
|
|
3341cb7396 | ||
|
|
4ca1e34378 | ||
|
|
658a9cc11b | ||
|
|
4ef973ceb6 | ||
|
|
bbfaad15c2 | ||
|
|
45ead71359 | ||
|
|
79aef73767 | ||
|
|
fc49833c9f | ||
|
|
b34eafcab1 | ||
|
|
ed4ba1aa24 | ||
|
|
f427bac993 | ||
|
|
7de3cec477 | ||
|
|
856c04220f | ||
|
|
6a8096b8d7 | ||
|
|
9bad663c4f | ||
|
|
720a735338 | ||
|
|
1ad7ba0afd | ||
|
|
176d01544e | ||
|
|
c55be0e392 | ||
|
|
2c2775c766 | ||
|
|
f90ae99018 | ||
|
|
e12cf3e494 | ||
|
|
f12abfbe01 | ||
|
|
7faab85b4d | ||
|
|
5e0c068cb9 | ||
|
|
7a18bddce3 | ||
|
|
0c11b12744 | ||
|
|
ba05991222 | ||
|
|
1f17095e11 | ||
|
|
ab42700245 | ||
|
|
3f912edc98 | ||
|
|
63b503a9fb | ||
|
|
90f7ba191b | ||
|
|
53a78211ef | ||
|
|
838860da40 | ||
|
|
6b2427f1c2 | ||
|
|
e3d08a4275 | ||
|
|
814431e3a8 | ||
|
|
6e20fbb174 | ||
|
|
53dee57e17 | ||
|
|
5c5ee2cc70 | ||
|
|
e0b83bda62 | ||
|
|
f7fe64a8df | ||
|
|
377dbd8aec | ||
|
|
f8d3fa0fdb | ||
|
|
5b858f2963 | ||
|
|
3620cdb5d2 | ||
|
|
546d98ca9c | ||
|
|
cb155a1172 | ||
|
|
ad62106cad | ||
|
|
2d6c5f43a1 | ||
|
|
9a433891f2 | ||
|
|
3c63d66591 | ||
|
|
5b69559762 | ||
|
|
d7a5c6d65b | ||
|
|
1588d3a199 | ||
|
|
d5df9a1f7f | ||
|
|
2be3d35952 | ||
|
|
7fa50070ce | ||
|
|
2494b64ccd | ||
|
|
ca3283fcad | ||
|
|
a912731cc7 | ||
|
|
1a855582a7 | ||
|
|
f3c00e1a57 | ||
|
|
0d3cbb1db2 | ||
|
|
2c96512a8a | ||
|
|
a84a70df14 | ||
|
|
dcea79cef3 | ||
|
|
b12365ba07 | ||
|
|
718eb7b381 | ||
|
|
503417719c | ||
|
|
e7a5eb7b22 | ||
|
|
b14f800fee | ||
|
|
9e91375632 | ||
|
|
d7d4000240 | ||
|
|
e12aef136a | ||
|
|
0e04b779a9 | ||
|
|
587034f573 | ||
|
|
321cba2af5 | ||
|
|
abed60bdfa | ||
|
|
a306fb64cb | ||
|
|
0ad5d67140 | ||
|
|
11863040bb | ||
|
|
a67a3837c8 | ||
|
|
81b10d126a | ||
|
|
9f751688cc | ||
|
|
3d0fbd0065 | ||
|
|
05ea814c61 | ||
|
|
92ba46b2f5 | ||
|
|
4bbe1ea614 | ||
|
|
e3a251ef29 | ||
|
|
a4e0d9c7df | ||
|
|
4076cd9847 | ||
|
|
e3f4fc2967 | ||
|
|
bccefc6a10 | ||
|
|
821471f4ab | ||
|
|
1e242b6d06 | ||
|
|
4ca5176836 | ||
|
|
7f397d529b | ||
|
|
656f354fdc | ||
|
|
4cc3ce224c | ||
|
|
a4a285c074 | ||
|
|
a8f8580606 | ||
|
|
e24918044e | ||
|
|
28d346eafb | ||
|
|
cbd2f4c643 | ||
|
|
fcedc9e445 | ||
|
|
d2d3c4bb36 | ||
|
|
dc4acc0730 | ||
|
|
043e5ca880 | ||
|
|
5c437dd8f9 | ||
|
|
31b898b2c6 | ||
|
|
e186474414 | ||
|
|
8bfb0b5088 | ||
|
|
045a31ac92 | ||
|
|
c9654a6b52 | ||
|
|
30e0924bfb | ||
|
|
ccada08db5 | ||
|
|
6654dd2672 | ||
|
|
4b0a7cc4d3 | ||
|
|
04acc49154 | ||
|
|
3db8a25eb9 | ||
|
|
8324d010ae | ||
|
|
f0022cd13f | ||
|
|
655c92cfef | ||
|
|
735ab8e118 | ||
|
|
b0861f4fe0 | ||
|
|
61b6779a31 | ||
|
|
416a058eab | ||
|
|
4227faa6b5 | ||
|
|
9d3c4598ac | ||
|
|
6aba701cca | ||
|
|
cf1b0c2f24 | ||
|
|
231c2fd281 | ||
|
|
e3f17b5420 | ||
|
|
56f1f71461 | ||
|
|
72f4578152 | ||
|
|
9c8125ffc1 | ||
|
|
85d7752350 | ||
|
|
63ba9fb5e0 | ||
|
|
14a59a26b2 | ||
|
|
178c154263 | ||
|
|
122488c2c1 | ||
|
|
ad3fbd7599 | ||
|
|
1ad1ca5385 | ||
|
|
2fc82ffa59 | ||
|
|
ed809474d6 | ||
|
|
a8c4a91001 | ||
|
|
1ffb1b4a5d | ||
|
|
9a00998930 | ||
|
|
c60f612e0e | ||
|
|
7839134532 | ||
|
|
714a2ef4fd | ||
|
|
7f98a65022 | ||
|
|
616e3af18f | ||
|
|
17ae197bc3 | ||
|
|
27cda7a437 | ||
|
|
54a2309d8f | ||
|
|
748c78e9c3 | ||
|
|
7ae0326f61 | ||
|
|
c15240abab | ||
|
|
2fe9e53766 | ||
|
|
7209ac0007 | ||
|
|
96adc7f61c | ||
|
|
7b4fd55aeb | ||
|
|
86c34bd87d | ||
|
|
41f7a7993d | ||
|
|
dfd94f67bd | ||
|
|
052c32ce78 | ||
|
|
004f10e73b | ||
|
|
f17608fa10 | ||
|
|
8de8a8a86a | ||
|
|
0486721b35 | ||
|
|
14bbc609d8 | ||
|
|
f450d71a25 | ||
|
|
921fca5e67 | ||
|
|
1f7e70fa16 | ||
|
|
3ca048bc76 | ||
|
|
37e36626ab | ||
|
|
07808a8664 | ||
|
|
16d490fbe3 | ||
|
|
b1cee71621 | ||
|
|
e55a4da2bc | ||
|
|
0fa0e64697 | ||
|
|
c54fffb51d | ||
|
|
294b6966bf | ||
|
|
9aa72f847c | ||
|
|
32a55f3c4f | ||
|
|
8e6a7f13a1 | ||
|
|
89e8fb715c | ||
|
|
21de4bbd1b | ||
|
|
d4cc7c88c3 | ||
|
|
0e9e29e650 | ||
|
|
176725a4b4 | ||
|
|
88560e7c43 | ||
|
|
2538899544 | ||
|
|
ae0fc32fe1 | ||
|
|
859875667a | ||
|
|
674dee5a1b | ||
|
|
0318e53fdc | ||
|
|
b768840d36 | ||
|
|
ccc8be009c | ||
|
|
59549c36de | ||
|
|
aeee8b4cb2 | ||
|
|
03acc33888 | ||
|
|
b2a943769b | ||
|
|
1501ed0c5d | ||
|
|
218eb5379e | ||
|
|
e596dd77bd | ||
|
|
1138c6e41a | ||
|
|
15f328eb9e | ||
|
|
01312ec286 | ||
|
|
0574350e6e | ||
|
|
7e297dcb75 | ||
|
|
d184486978 | ||
|
|
337e33eb8a | ||
|
|
988dd1bcf0 | ||
|
|
25fc7b83ec | ||
|
|
893fcfa6ee | ||
|
|
23f94538c8 | ||
|
|
7586b50c5a | ||
|
|
0bee0a6d90 | ||
|
|
f89f3c0b14 | ||
|
|
598e71eb8e | ||
|
|
4d14416a08 | ||
|
|
2657276a80 | ||
|
|
48538e6b96 | ||
|
|
9337ff4b41 | ||
|
|
02cd069bb2 | ||
|
|
6a71d311b3 | ||
|
|
0b8367c817 | ||
|
|
b20fc39e08 | ||
|
|
1dd7bdb100 | ||
|
|
591ea96285 | ||
|
|
ee6b290a0c | ||
|
|
a37476a09b | ||
|
|
d41805a3b0 | ||
|
|
5d6bb18679 | ||
|
|
a393ea4d68 | ||
|
|
26b95f1b9f | ||
|
|
34be2953d3 | ||
|
|
e04d5fa7e8 | ||
|
|
2df5a9d72d | ||
|
|
b6e111b835 | ||
|
|
6a4aa7e5f5 | ||
|
|
962fc5e9ff | ||
|
|
4d5ee861ec | ||
|
|
2aef4578b0 | ||
|
|
f8bfd1abc4 | ||
|
|
a75f4f02d6 | ||
|
|
5ae4a59060 | ||
|
|
896836b57d | ||
|
|
06bedc92dc | ||
|
|
8a05b32a30 | ||
|
|
865409d725 | ||
|
|
7554bce11c |
9
.github/CODEOWNERS
vendored
@@ -1,13 +1,10 @@
|
|||||||
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
||||||
# Owners are automatically requested for review for PRs that changes code
|
# Owners are automatically requested for review for PRs that changes code
|
||||||
# that they own.
|
# that they own.
|
||||||
* @ankitnayan
|
|
||||||
|
|
||||||
/frontend/ @palashgdev
|
/frontend/ @YounixM
|
||||||
|
/frontend/src/container/MetricsApplication @srikanthccv
|
||||||
|
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||||
/deploy/ @prashant-shahi
|
/deploy/ @prashant-shahi
|
||||||
/sample-apps/ @prashant-shahi
|
/sample-apps/ @prashant-shahi
|
||||||
**/query-service/ @srikanthccv
|
|
||||||
Makefile @srikanthccv
|
|
||||||
go.* @srikanthccv
|
|
||||||
.git* @srikanthccv
|
|
||||||
.github @prashant-shahi
|
.github @prashant-shahi
|
||||||
|
|||||||
17
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
### Summary
|
||||||
|
|
||||||
|
<!-- ✍️ A clear and concise description...-->
|
||||||
|
|
||||||
|
#### Related Issues / PR's
|
||||||
|
|
||||||
|
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
|
||||||
|
|
||||||
|
#### Screenshots
|
||||||
|
|
||||||
|
NA
|
||||||
|
|
||||||
|
<!-- ✍️ Add screenshots of before and after changes where applicable-->
|
||||||
|
|
||||||
|
#### Affected Areas and Manually Tested Areas
|
||||||
|
|
||||||
|
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
|
||||||
38
.github/workflows/build.yaml
vendored
@@ -12,7 +12,31 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Install dependencies
|
||||||
|
run: cd frontend && yarn install
|
||||||
|
- name: Run ESLint
|
||||||
|
run: cd frontend && npm run lint
|
||||||
|
- name: Run Jest
|
||||||
|
run: cd frontend && npm run jest
|
||||||
|
- name: TSC
|
||||||
|
run: yarn tsc
|
||||||
|
working-directory: ./frontend
|
||||||
|
- name: Build frontend docker image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
make build-frontend-amd64
|
||||||
|
|
||||||
|
build-frontend-ee:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Create .env file
|
||||||
|
run: |
|
||||||
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd frontend && yarn install
|
run: cd frontend && yarn install
|
||||||
- name: Run ESLint
|
- name: Run ESLint
|
||||||
@@ -31,7 +55,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -45,7 +73,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Build EE query-service image
|
- name: Build EE query-service image
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/codeql.yaml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
2
.github/workflows/commitlint.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
|||||||
lint-commits:
|
lint-commits:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: wagoid/commitlint-github-action@v5
|
- uses: wagoid/commitlint-github-action@v5
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Codebase
|
- name: Checkout Codebase
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: signoz/gh-bot
|
repository: signoz/gh-bot
|
||||||
- name: Use Node v16
|
- name: Use Node v16
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 16
|
node-version: 16
|
||||||
- name: Setup Cache & Install Dependencies
|
- name: Setup Cache & Install Dependencies
|
||||||
|
|||||||
4
.github/workflows/dependency-review.yml
vendored
@@ -15,8 +15,8 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Checkout Repository'
|
- name: 'Checkout Repository'
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
with:
|
with:
|
||||||
fail-on-severity: high
|
fail-on-severity: high
|
||||||
uses: actions/dependency-review-action@v2
|
uses: actions/dependency-review-action@v3
|
||||||
|
|||||||
13
.github/workflows/e2e-k3s.yaml
vendored
@@ -13,7 +13,12 @@ jobs:
|
|||||||
DOCKER_TAG: pull-${{ github.event.number }}
|
DOCKER_TAG: pull-${{ github.event.number }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
- name: Build query-service image
|
- name: Build query-service image
|
||||||
env:
|
env:
|
||||||
@@ -65,9 +70,9 @@ jobs:
|
|||||||
- name: Kick off a sample-app workload
|
- name: Kick off a sample-app workload
|
||||||
run: |
|
run: |
|
||||||
# start the locust swarm
|
# start the locust swarm
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl --namespace sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||||
|
|
||||||
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
||||||
id: get-subdomain
|
id: get-subdomain
|
||||||
|
|||||||
4
.github/workflows/playwright.yaml
vendored
@@ -9,8 +9,8 @@ jobs:
|
|||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v3
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: "16.x"
|
node-version: "16.x"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
|
|||||||
101
.github/workflows/push.yaml
vendored
@@ -14,15 +14,19 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -30,7 +34,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -42,6 +46,11 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-query-service
|
run: make build-push-query-service
|
||||||
|
|
||||||
@@ -49,15 +58,19 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -65,7 +78,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -77,6 +90,11 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-ee-query-service
|
run: make build-push-ee-query-service
|
||||||
|
|
||||||
@@ -84,7 +102,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
run: yarn install
|
run: yarn install
|
||||||
@@ -97,11 +115,11 @@ jobs:
|
|||||||
run: npm run lint
|
run: npm run lint
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -109,7 +127,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -123,3 +141,58 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-frontend
|
run: make build-push-frontend
|
||||||
|
|
||||||
|
image-build-and-push-frontend-ee:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Create .env file
|
||||||
|
run: |
|
||||||
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||||
|
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: yarn install
|
||||||
|
- name: Run Prettier
|
||||||
|
working-directory: frontend
|
||||||
|
run: npm run prettify
|
||||||
|
continue-on-error: true
|
||||||
|
- name: Run ESLint
|
||||||
|
working-directory: frontend
|
||||||
|
run: npm run lint
|
||||||
|
continue-on-error: true
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- uses: benjlevesque/short-sha@v2.2
|
||||||
|
id: short-sha
|
||||||
|
- name: Get branch name
|
||||||
|
id: branch-name
|
||||||
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
|
- name: Set docker tag environment
|
||||||
|
run: |
|
||||||
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
|
tag="${{ steps.branch-name.outputs.tag }}"
|
||||||
|
tag="${tag:1}"
|
||||||
|
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
|
||||||
|
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||||
|
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
- name: Build and push docker image
|
||||||
|
run: make build-push-frontend
|
||||||
|
|||||||
2
.github/workflows/sonar.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Sonar analysis
|
- name: Sonar analysis
|
||||||
|
|||||||
4
.github/workflows/staging-deployment.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
environment: staging
|
environment: staging
|
||||||
steps:
|
steps:
|
||||||
- name: Executing remote ssh commands using ssh key
|
- name: Executing remote ssh commands using ssh key
|
||||||
uses: appleboy/ssh-action@v0.1.8
|
uses: appleboy/ssh-action@v1.0.3
|
||||||
env:
|
env:
|
||||||
GITHUB_BRANCH: develop
|
GITHUB_BRANCH: develop
|
||||||
GITHUB_SHA: ${{ github.sha }}
|
GITHUB_SHA: ${{ github.sha }}
|
||||||
@@ -26,8 +26,10 @@ jobs:
|
|||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
export OTELCOL_TAG="main"
|
export OTELCOL_TAG="main"
|
||||||
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
docker system prune --force
|
docker system prune --force
|
||||||
docker pull signoz/signoz-otel-collector:main
|
docker pull signoz/signoz-otel-collector:main
|
||||||
|
docker pull signoz/signoz-schema-migrator:main
|
||||||
cd ~/signoz
|
cd ~/signoz
|
||||||
git status
|
git status
|
||||||
git add .
|
git add .
|
||||||
|
|||||||
8
.github/workflows/testing-deployment.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||||
steps:
|
steps:
|
||||||
- name: Executing remote ssh commands using ssh key
|
- name: Executing remote ssh commands using ssh key
|
||||||
uses: appleboy/ssh-action@v0.1.8
|
uses: appleboy/ssh-action@v1.0.3
|
||||||
env:
|
env:
|
||||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||||
GITHUB_SHA: ${{ github.sha }}
|
GITHUB_SHA: ${{ github.sha }}
|
||||||
@@ -26,14 +26,18 @@ jobs:
|
|||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
export DEV_BUILD="1"
|
export DEV_BUILD="1"
|
||||||
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
docker system prune --force
|
docker system prune --force
|
||||||
cd ~/signoz
|
cd ~/signoz
|
||||||
git status
|
git status
|
||||||
git add .
|
git add .
|
||||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||||
git fetch origin
|
git fetch origin
|
||||||
git checkout ${GITHUB_BRANCH}
|
git checkout develop
|
||||||
git pull
|
git pull
|
||||||
|
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||||
|
git branch -D ${GITHUB_BRANCH}
|
||||||
|
git checkout --track origin/${GITHUB_BRANCH}
|
||||||
make build-ee-query-service-amd64
|
make build-ee-query-service-amd64
|
||||||
make build-frontend-amd64
|
make build-frontend-amd64
|
||||||
make run-signoz
|
make run-signoz
|
||||||
11
.gitignore
vendored
@@ -37,7 +37,7 @@ frontend/src/constants/env.ts
|
|||||||
**/locust-scripts/__pycache__/
|
**/locust-scripts/__pycache__/
|
||||||
**/__debug_bin
|
**/__debug_bin
|
||||||
|
|
||||||
frontend/*.env
|
.env
|
||||||
pkg/query-service/signoz.db
|
pkg/query-service/signoz.db
|
||||||
|
|
||||||
pkg/query-service/tests/test-deploy/data/
|
pkg/query-service/tests/test-deploy/data/
|
||||||
@@ -53,3 +53,12 @@ ee/query-service/tests/test-deploy/data/
|
|||||||
bin/
|
bin/
|
||||||
|
|
||||||
*/query-service/queries.active
|
*/query-service/queries.active
|
||||||
|
|
||||||
|
# e2e
|
||||||
|
|
||||||
|
e2e/node_modules/
|
||||||
|
e2e/test-results/
|
||||||
|
e2e/playwright-report/
|
||||||
|
e2e/blob-report/
|
||||||
|
e2e/playwright/.cache/
|
||||||
|
e2e/.auth
|
||||||
86
Makefile
@@ -8,6 +8,7 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
|||||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||||
|
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||||
|
|
||||||
# Internal variables or constants.
|
# Internal variables or constants.
|
||||||
FRONTEND_DIRECTORY ?= frontend
|
FRONTEND_DIRECTORY ?= frontend
|
||||||
@@ -15,15 +16,15 @@ QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
|||||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||||
LOCAL_GOOS ?= $(shell go env GOOS)
|
|
||||||
LOCAL_GOARCH ?= $(shell go env GOARCH)
|
GOOS ?= $(shell go env GOOS)
|
||||||
|
GOARCH ?= $(shell go env GOARCH)
|
||||||
|
GOPATH ?= $(shell go env GOPATH)
|
||||||
|
|
||||||
REPONAME ?= signoz
|
REPONAME ?= signoz
|
||||||
DOCKER_TAG ?= latest
|
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
|
||||||
|
|
||||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||||
DEV_BUILD ?= ""
|
|
||||||
|
|
||||||
# Build-time Go variables
|
# Build-time Go variables
|
||||||
PACKAGE?=go.signoz.io/signoz
|
PACKAGE?=go.signoz.io/signoz
|
||||||
@@ -37,10 +38,22 @@ LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildV
|
|||||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||||
|
|
||||||
all: build-push-frontend build-push-query-service
|
all: build-push-frontend build-push-query-service
|
||||||
|
|
||||||
|
# Steps to build static files of frontend
|
||||||
|
build-frontend-static:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building frontend static files"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
|
rm -rf build && \
|
||||||
|
CI=1 yarn install && \
|
||||||
|
yarn build && \
|
||||||
|
ls -l build
|
||||||
|
|
||||||
# Steps to build and push docker image of frontend
|
# Steps to build and push docker image of frontend
|
||||||
.PHONY: build-frontend-amd64 build-push-frontend
|
.PHONY: build-frontend-amd64 build-push-frontend
|
||||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||||
build-frontend-amd64:
|
build-frontend-amd64: build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building frontend docker image for amd64"
|
@echo "--> Building frontend docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -49,7 +62,7 @@ build-frontend-amd64:
|
|||||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of frontend(used in push pipeline)
|
# Step to build and push docker image of frontend(used in push pipeline)
|
||||||
build-push-frontend:
|
build-push-frontend: build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing frontend docker image"
|
@echo "--> Building and pushing frontend docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -57,24 +70,52 @@ build-push-frontend:
|
|||||||
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
||||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
|
# Steps to build static binary of query service
|
||||||
|
.PHONY: build-query-service-static
|
||||||
|
build-query-service-static:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building query-service static binary"
|
||||||
|
@echo "------------------"
|
||||||
|
@if [ $(DEV_BUILD) != "" ]; then \
|
||||||
|
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||||
|
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||||
|
else \
|
||||||
|
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||||
|
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: build-query-service-static-amd64
|
||||||
|
build-query-service-static-amd64:
|
||||||
|
make GOARCH=amd64 build-query-service-static
|
||||||
|
|
||||||
|
.PHONY: build-query-service-static-arm64
|
||||||
|
build-query-service-static-arm64:
|
||||||
|
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
|
||||||
|
|
||||||
|
# Steps to build static binary of query service for all platforms
|
||||||
|
.PHONY: build-query-service-static-all
|
||||||
|
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
|
||||||
|
|
||||||
# Steps to build and push docker image of query service
|
# Steps to build and push docker image of query service
|
||||||
.PHONY: build-query-service-amd64 build-push-query-service
|
.PHONY: build-query-service-amd64 build-push-query-service
|
||||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||||
build-query-service-amd64:
|
build-query-service-amd64: build-query-service-static-amd64
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-query-service:
|
build-push-query-service: build-query-service-static-all
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
--push --platform linux/arm64,linux/amd64 \
|
||||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
||||||
@@ -82,24 +123,14 @@ build-ee-query-service-amd64:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@if [ $(DEV_BUILD) != "" ]; then \
|
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
|
||||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
|
|
||||||
else \
|
|
||||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-ee-query-service:
|
build-push-ee-query-service:
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
|
||||||
--progress plain --push --platform linux/arm64,linux/amd64 \
|
|
||||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
|
||||||
|
|
||||||
dev-setup:
|
dev-setup:
|
||||||
mkdir -p /var/lib/signoz
|
mkdir -p /var/lib/signoz
|
||||||
@@ -110,7 +141,7 @@ dev-setup:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
|
|
||||||
run-local:
|
run-local:
|
||||||
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
|
@docker-compose -f \
|
||||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||||
up --build -d
|
up --build -d
|
||||||
|
|
||||||
@@ -151,3 +182,6 @@ test:
|
|||||||
go test ./pkg/query-service/app/querier/...
|
go test ./pkg/query-service/app/querier/...
|
||||||
go test ./pkg/query-service/converter/...
|
go test ./pkg/query-service/converter/...
|
||||||
go test ./pkg/query-service/formatter/...
|
go test ./pkg/query-service/formatter/...
|
||||||
|
go test ./pkg/query-service/tests/integration/...
|
||||||
|
go test ./pkg/query-service/rules/...
|
||||||
|
go test ./pkg/query-service/collectorsimulator/...
|
||||||
|
|||||||
@@ -11,7 +11,6 @@
|
|||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
||||||
@@ -40,12 +39,13 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei
|
|||||||
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Anwendung Metriken
|
### Anwendung Metriken
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
### Verteiltes Tracing
|
### Verteiltes Tracing
|
||||||
|
|
||||||
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||||
|
|
||||||
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||||
@@ -62,22 +62,18 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
### Alarme
|
### Alarme
|
||||||
|
|
||||||
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||||
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Werde Teil unserer Slack Community
|
## Werde Teil unserer Slack Community
|
||||||
|
|
||||||
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Funktionen:
|
## Funktionen:
|
||||||
|
|
||||||
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
|
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
|
||||||
@@ -93,7 +89,6 @@ Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Wieso SigNoz?
|
## Wieso SigNoz?
|
||||||
|
|
||||||
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
||||||
@@ -116,12 +111,10 @@ Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als Bibliothek, mit
|
|||||||
- Elixir
|
- Elixir
|
||||||
- Rust
|
- Rust
|
||||||
|
|
||||||
|
|
||||||
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Erste Schritte mit SigNoz
|
## Erste Schritte mit SigNoz
|
||||||
|
|
||||||
### Bereitstellung mit Docker
|
### Bereitstellung mit Docker
|
||||||
@@ -138,7 +131,6 @@ Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelistet
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Vergleiche mit bekannten Tools
|
## Vergleiche mit bekannten Tools
|
||||||
|
|
||||||
### SigNoz vs Prometheus
|
### SigNoz vs Prometheus
|
||||||
@@ -179,7 +171,6 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Zum Projekt beitragen
|
## Zum Projekt beitragen
|
||||||
|
|
||||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||||
@@ -197,6 +188,8 @@ Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #c
|
|||||||
#### Frontend
|
#### Frontend
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Yunus M](https://github.com/YounixM)
|
||||||
|
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||||
|
|
||||||
#### DevOps
|
#### DevOps
|
||||||
|
|
||||||
@@ -204,16 +197,12 @@ Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #c
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Dokumentation
|
## Dokumentation
|
||||||
|
|
||||||
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|
||||||
## Gemeinschaft
|
## Gemeinschaft
|
||||||
|
|
||||||
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
@@ -108,7 +108,7 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
|
|||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- Node.js
|
||||||
- Go
|
- Go
|
||||||
- PHP
|
- PHP
|
||||||
- .NET
|
- .NET
|
||||||
@@ -199,10 +199,13 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
|||||||
#### Frontend
|
#### Frontend
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Yunus M](https://github.com/YounixM)
|
||||||
|
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||||
|
|
||||||
#### DevOps
|
#### DevOps
|
||||||
|
|
||||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
- [Dhawal Sanghvi](https://github.com/dhawal1248)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|||||||
203
README.zh-cn.md
@@ -1,170 +1,227 @@
|
|||||||
<p align="center">
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
|
||||||
|
|
||||||
<p align="center">监视你的应用,并可排查已部署应用中的问题,这是一个开源的可替代DataDog、NewRelic的方案</p>
|
<p align="center">监控你的应用,并且可排查已部署应用的问题,这是一个可替代 DataDog、NewRelic 的开源方案</p>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://signoz.io/docs"><b>文档</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||||
|
<a href="https://signoz.io/slack"><b>Slack 社区</b></a> •
|
||||||
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
|
SigNoz 帮助开发人员监控应用并排查已部署应用的问题。你可以使用 SigNoz 实现如下能力:
|
||||||
|
|
||||||
👉 你能看到一些性能指标,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
👉 在同一块面板上,可视化 Metrics, Traces 和 Logs 内容。
|
||||||
|
|
||||||
👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
👉 你可以关注服务的 p99 延迟和错误率, 包括外部 API 调用和个别的端点。
|
||||||
|
|
||||||
👉 聚合trace数据来获得业务相关指标。
|
👉 你可以找到问题的根因,通过提取相关问题的 traces 日志、单独查看请求 traces 的火焰图详情。
|
||||||
|
|
||||||

|
👉 执行 trace 数据聚合,以获取业务相关的 metrics
|
||||||
<br />
|
|
||||||

|
👉 对日志过滤和查询,通过日志的属性建立看板和告警
|
||||||
<br />
|
|
||||||

|
👉 通过 Python,java,Ruby 和 Javascript 自动记录异常
|
||||||
|
|
||||||
|
👉 轻松的自定义查询和设置告警
|
||||||
|
|
||||||
|
### 应用 Metrics 展示
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 分布式追踪
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||||
|
|
||||||
|
### 日志管理
|
||||||
|
|
||||||
|
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||||
|
|
||||||
|
### 基础设施监控
|
||||||
|
|
||||||
|
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||||
|
|
||||||
|
### 异常监控
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 告警
|
||||||
|
|
||||||
|
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
## 加入我们 Slack 社区
|
||||||
|
|
||||||
## 加入我们的Slack社区
|
来 [Slack](https://signoz.io/slack) 和我们打招呼吧 👋
|
||||||
|
|
||||||
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
## 特性:
|
||||||
|
|
||||||
## 功能:
|
- 为 metrics, traces and logs 制定统一的 UI。 无需切换 Prometheus 到 Jaeger 去查找问题,也无需使用想 Elastic 这样的日志工具分开你的 metrics 和 traces
|
||||||
|
|
||||||
- 应用概览指标(metrics),如RPS, p50/p90/p99延迟率分位值,错误率等。
|
- 默认统计应用的 metrics 数据,像 RPS (每秒请求数), 50th/90th/99th 的分位数延迟数据,还有相关的错误率
|
||||||
- 应用中最慢的终端(endpoint)
|
|
||||||
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
- 找到应用中最慢的端点
|
||||||
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
|
|
||||||
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
|
- 查看准确的请求跟踪数据,找到下游服务的问题了,比如 DB 慢查询,或者调用第三方的支付网关等
|
||||||
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
|
|
||||||
|
- 通过 服务名、操作方式、延迟、错误、标签/注释 过滤 traces 数据
|
||||||
|
|
||||||
|
- 通过聚合 trace 数据而获得业务相关的 metrics。 比如你可以通过 `customer_type: gold` 或者 `deployment_version: v2` 或者 `external_call: paypal` 获取错误率和 P99 延迟数据
|
||||||
|
|
||||||
|
- 原生支持 OpenTelemetry 日志,高级日志查询,自动收集 k8s 相关日志
|
||||||
|
|
||||||
|
- 快如闪电的日志分析 ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||||
|
|
||||||
|
- 可视化点到点的基础设施性能,提取有所有类型机器的 metrics 数据
|
||||||
|
|
||||||
|
- 轻易自定义告警查询
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
## 为什么使用 SigNoz?
|
||||||
|
|
||||||
## 为何选择SigNoz?
|
作为开发者, 我们发现 SaaS 厂商对一些大家想要的小功能都是闭源的,这种行为真的让人有点恼火。 闭源厂商还会在月底给你一张没有明细的巨额账单。
|
||||||
|
|
||||||
作为开发人员,我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦,闭源厂商通常会给你一份巨额月付账单,但不提供足够的透明度,你不知道你为哪些功能付费。
|
我们想做一个自托管并且可开源的工具,像 DataDog 和 NewRelic 那样, 为那些担心数据隐私和安全的公司提供第三方服务。
|
||||||
|
|
||||||
我们想做一个自服务的开源版本的工具,类似于DataDog和NewRelic,用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
|
作为开源的项目,你完全可以自己掌控你的配置、样本和更新。你同样可以基于 SigNoz 拓展特定的业务模块。
|
||||||
|
|
||||||
开源也让你对配置、采样和正常运行时间有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
### 支持的编程语言:
|
||||||
|
|
||||||
### 语言支持
|
我们支持 [OpenTelemetry](https://opentelemetry.io)。作为一个观测你应用的库文件。所以任何 OpenTelemetry 支持的框架和语言,对于 SigNoz 也同样支持。 一些主要支持的语言如下:
|
||||||
|
|
||||||
我们支持[OpenTelemetry](https://opentelemetry.io)库,你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
|
|
||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- NodeJS
|
||||||
- Go
|
- Go
|
||||||
|
- PHP
|
||||||
|
- .NET
|
||||||
|
- Ruby
|
||||||
|
- Elixir
|
||||||
|
- Rust
|
||||||
|
|
||||||
你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
|
你可以在这里找到全部支持的语言列表 - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
## 让我们开始吧
|
||||||
|
|
||||||
## 入门
|
### 使用 Docker 部署
|
||||||
|
|
||||||
|
请一步步跟随 [这里](https://signoz.io/docs/install/docker/) 通过 docker 来安装。
|
||||||
|
|
||||||
### 使用Docker部署
|
这个 [排障说明书](https://signoz.io/docs/install/troubleshooting/) 可以帮助你解决碰到的问题。
|
||||||
|
|
||||||
请按照[这里](https://signoz.io/docs/install/docker/)列出的步骤使用Docker来安装
|
|
||||||
|
|
||||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/install/troubleshooting/)会对你有帮助。
|
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
|
### 使用 Helm 在 Kubernetes 部署
|
||||||
|
|
||||||
### 使用Helm在Kubernetes上部署
|
请一步步跟随 [这里](https://signoz.io/docs/deployment/helm_chart) 通过 helm 来安装
|
||||||
|
|
||||||
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
## 比较相似的工具
|
||||||
|
|
||||||
## 与其他方案的比较
|
|
||||||
|
|
||||||
### SigNoz vs Prometheus
|
### SigNoz vs Prometheus
|
||||||
|
|
||||||
如果你只是需要监控指标(metrics),那Prometheus是不错的,但如果你要无缝的在metrics和traces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
Prometheus 是一个针对 metrics 监控的强大工具。但是如果你想无缝的切换 metrics 和 traces 查询,你当前大概率需要在 Prometheus 和 Jaeger 之间切换。
|
||||||
|
|
||||||
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合,这是目前Jaeger缺失的功能。
|
我们的目标是提供一个客户观测 metrics 和 traces 整合的 UI。就像 SaaS 供应商 DataDog,它提供很多 jaeger 缺失的功能,比如针对 traces 过滤功能和聚合功能。
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
### SigNoz vs Jaeger
|
### SigNoz vs Jaeger
|
||||||
|
|
||||||
Jaeger只做分布式追踪(distributed tracing),SigNoz则支持metrics,traces,logs ,即可视化的三大支柱。
|
Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metrics, traces 和 logs 所有的观测。
|
||||||
|
|
||||||
并且SigNoz有一些Jaeger没有的高级功能:
|
而且, SigNoz 相较于 Jaeger 拥有更对的高级功能:
|
||||||
|
|
||||||
- Jaegar UI无法在traces或过滤的traces上展示metrics。
|
- Jaegar UI 不能提供任何基于 traces 的 metrics 查询和过滤。
|
||||||
- Jaeger不能对过滤的traces做聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
|
|
||||||
|
- Jaeger 不能针对过滤的 traces 做聚合。 比如, p99 延迟的请求有个标签是 customer_type='premium'。 而这些在 SigNoz 可以轻松做到。
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Elastic
|
||||||
|
|
||||||
|
- SigNoz 的日志管理是基于 ClickHouse 实现的,可以使日志的聚合更加高效,因为它是基于 OLAP 的数据仓储。
|
||||||
|
|
||||||
|
- 与 Elastic 相比,可以节省 50% 的资源成本
|
||||||
|
|
||||||
|
我们已经公布了 Elastic 和 SigNoz 的性能对比。 请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Loki
|
||||||
|
|
||||||
|
- SigNoz 支持大容量高基数的聚合,但是 loki 是不支持的。
|
||||||
|
|
||||||
|
- SigNoz 支持索引的高基数查询,并且对索引没有数量限制,而 Loki 会在添加部分索引后到达最大上限。
|
||||||
|
|
||||||
|
- 相较于 SigNoz,Loki 在搜索大量数据下既困难又缓慢。
|
||||||
|
|
||||||
|
我们已经发布了基准测试对比 Loki 和 SigNoz 性能。请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
|
||||||
|
|
||||||
## 贡献
|
## 贡献
|
||||||
|
|
||||||
|
我们 ❤️ 你的贡献,无论大小。 请先阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 再开始给 SigNoz 做贡献。
|
||||||
|
|
||||||
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
|
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
|
||||||
|
|
||||||
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
### 项目维护人员
|
||||||
|
|
||||||
### Project maintainers
|
#### 后端
|
||||||
|
|
||||||
#### Backend
|
|
||||||
|
|
||||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||||
- [Vishal Sharma](https://github.com/makeavish)
|
- [Vishal Sharma](https://github.com/makeavish)
|
||||||
|
|
||||||
#### Frontend
|
#### 前端
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Yunus M](https://github.com/YounixM)
|
||||||
|
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||||
|
|
||||||
#### DevOps
|
#### 运维开发
|
||||||
|
|
||||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
|
||||||
|
|
||||||
## 文档
|
## 文档
|
||||||
|
|
||||||
文档在这里:https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失,请在Github里发一个问题,并使用标签 `documentation` 或者在社区stack频道里告诉我们。
|
你可以通过 https://signoz.io/docs/ 找到相关文档。如果你需要阐述问题或者发现一些确实的事件, 通过标签为 `documentation` 提交 Github 问题。或者通过 slack 社区频道。
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
|
||||||
|
|
||||||
## 社区
|
## 社区
|
||||||
|
|
||||||
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
|
加入 [slack 社区](https://signoz.io/slack) 去了解更多关于分布式追踪、可观测性系统 。或者与 SigNoz 其他用户和贡献者交流。
|
||||||
|
|
||||||
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。
|
如果你有任何想法、问题、或者任何反馈, 请通过 [Github Discussions](https://github.com/SigNoz/signoz/discussions) 分享。
|
||||||
|
|
||||||
最后,感谢我们这些优秀的贡献者们。
|
不管怎么样,感谢这个项目的所有贡献者!
|
||||||
|
|
||||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
x-clickhouse-defaults:
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
&clickhouse-defaults
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
|
||||||
tty: true
|
tty: true
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
@@ -34,13 +33,14 @@ x-clickhouse-defaults:
|
|||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-clickhouse-depend:
|
x-db-depend: &db-depend
|
||||||
&clickhouse-depend
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- clickhouse
|
- clickhouse
|
||||||
|
- otel-collector-migrator
|
||||||
# - clickhouse-2
|
# - clickhouse-2
|
||||||
# - clickhouse-3
|
# - clickhouse-3
|
||||||
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
@@ -133,7 +133,7 @@ services:
|
|||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.2
|
image: signoz/alertmanager:0.23.5
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
command:
|
command:
|
||||||
@@ -146,8 +146,12 @@ services:
|
|||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.26.1
|
image: signoz/query-service:0.39.0
|
||||||
command: [ "-config=/root/config/prometheus.yml" ]
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
# "--prefer-delta=true"
|
||||||
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -156,7 +160,7 @@ services:
|
|||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
- DASHBOARDS_PATH=/root/config/dashboards
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
@@ -179,10 +183,10 @@ services:
|
|||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.26.1
|
image: signoz/frontend:0.39.0
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@@ -195,15 +199,17 @@ services:
|
|||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.79.5
|
image: signoz/signoz-otel-collector:0.88.12
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
]
|
]
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||||
@@ -225,26 +231,23 @@ services:
|
|||||||
mode: global
|
mode: global
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
- otel-collector-migrator
|
||||||
|
- query-service
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-migrator:
|
||||||
image: signoz/signoz-otel-collector:0.79.5
|
image: signoz/signoz-schema-migrator:0.88.12
|
||||||
command:
|
deploy:
|
||||||
[
|
restart_policy:
|
||||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
condition: on-failure
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
delay: 5s
|
||||||
]
|
command:
|
||||||
volumes:
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
depends_on:
|
||||||
# ports:
|
- clickhouse
|
||||||
# - "1777:1777" # pprof extension
|
# - clickhouse-2
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
# - clickhouse-3
|
||||||
# - "13133:13133" # Health check extension
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
<<: *clickhouse-depend
|
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
|||||||
@@ -15,13 +15,9 @@ receivers:
|
|||||||
# please remove names from below if you want to collect logs from them
|
# please remove names from below if you want to collect logs from them
|
||||||
- type: filter
|
- type: filter
|
||||||
id: signoz_logs_filter
|
id: signoz_logs_filter
|
||||||
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||||
opencensus:
|
opencensus:
|
||||||
endpoint: 0.0.0.0:55678
|
endpoint: 0.0.0.0:55678
|
||||||
otlp/spanmetrics:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
endpoint: localhost:12345
|
|
||||||
otlp:
|
otlp:
|
||||||
protocols:
|
protocols:
|
||||||
grpc:
|
grpc:
|
||||||
@@ -61,40 +57,6 @@ receivers:
|
|||||||
job_name: otel-collector
|
job_name: otel-collector
|
||||||
|
|
||||||
processors:
|
processors:
|
||||||
logstransform/internal:
|
|
||||||
operators:
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.trace_id
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.span_id
|
|
||||||
output: remove_trace_id
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"traceId" in attributes or "spanId" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.traceId
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.spanId
|
|
||||||
output: remove_traceId
|
|
||||||
- id: remove_traceId
|
|
||||||
type: remove
|
|
||||||
if: '"traceId" in attributes'
|
|
||||||
field: attributes.traceId
|
|
||||||
output: remove_spanId
|
|
||||||
- id: remove_spanId
|
|
||||||
type: remove
|
|
||||||
if: '"spanId" in attributes'
|
|
||||||
field: attributes.spanId
|
|
||||||
- id: remove_trace_id
|
|
||||||
type: remove
|
|
||||||
if: '"trace_id" in attributes'
|
|
||||||
field: attributes.trace_id
|
|
||||||
output: remove_span_id
|
|
||||||
- id: remove_span_id
|
|
||||||
type: remove
|
|
||||||
if: '"span_id" in attributes'
|
|
||||||
field: attributes.span_id
|
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 10000
|
send_batch_size: 10000
|
||||||
send_batch_max_size: 11000
|
send_batch_max_size: 11000
|
||||||
@@ -103,8 +65,8 @@ processors:
|
|||||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
signozspanmetrics/prometheus:
|
signozspanmetrics/cumulative:
|
||||||
metrics_exporter: prometheus
|
metrics_exporter: clickhousemetricswrite
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
dimensions:
|
dimensions:
|
||||||
@@ -131,6 +93,21 @@ processors:
|
|||||||
# num_workers: 4
|
# num_workers: 4
|
||||||
# queue_size: 100
|
# queue_size: 100
|
||||||
# retry_on_failure: true
|
# retry_on_failure: true
|
||||||
|
signozspanmetrics/delta:
|
||||||
|
metrics_exporter: clickhousemetricswrite
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 100000
|
||||||
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
|
# collectors result in incorrect APM metrics
|
||||||
|
- name: signoz.collector.id
|
||||||
|
|
||||||
exporters:
|
exporters:
|
||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
@@ -143,21 +120,11 @@ exporters:
|
|||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
prometheus:
|
|
||||||
endpoint: 0.0.0.0:8889
|
|
||||||
# logging: {}
|
# logging: {}
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/
|
dsn: tcp://clickhouse:9000/
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
sending_queue:
|
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure:
|
|
||||||
enabled: true
|
|
||||||
initial_interval: 5s
|
|
||||||
max_interval: 30s
|
|
||||||
max_elapsed_time: 300s
|
|
||||||
|
|
||||||
extensions:
|
extensions:
|
||||||
health_check:
|
health_check:
|
||||||
endpoint: 0.0.0.0:13133
|
endpoint: 0.0.0.0:13133
|
||||||
@@ -174,7 +141,7 @@ service:
|
|||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/prometheus, batch]
|
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
@@ -188,10 +155,7 @@ service:
|
|||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus]
|
exporters: [clickhousemetricswrite/prometheus]
|
||||||
metrics/spanmetrics:
|
|
||||||
receivers: [otlp/spanmetrics]
|
|
||||||
exporters: [prometheus]
|
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, tcplog/docker]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [logstransform/internal, batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter]
|
exporters: [clickhouselogsexporter]
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
receivers:
|
|
||||||
prometheus:
|
|
||||||
config:
|
|
||||||
scrape_configs:
|
|
||||||
# otel-collector-metrics internal metrics
|
|
||||||
- job_name: otel-collector-metrics
|
|
||||||
scrape_interval: 60s
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- localhost:8888
|
|
||||||
labels:
|
|
||||||
job_name: otel-collector-metrics
|
|
||||||
# SigNoz span metrics
|
|
||||||
- job_name: signozspanmetrics-collector
|
|
||||||
scrape_interval: 60s
|
|
||||||
dns_sd_configs:
|
|
||||||
- names:
|
|
||||||
- tasks.otel-collector
|
|
||||||
type: A
|
|
||||||
port: 8889
|
|
||||||
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 10000
|
|
||||||
send_batch_max_size: 11000
|
|
||||||
timeout: 10s
|
|
||||||
# memory_limiter:
|
|
||||||
# # 80% of maximum memory up to 2G
|
|
||||||
# limit_mib: 1500
|
|
||||||
# # 25% of limit up to 2G
|
|
||||||
# spike_limit_mib: 512
|
|
||||||
# check_interval: 5s
|
|
||||||
#
|
|
||||||
# # 50% of the maximum memory
|
|
||||||
# limit_percentage: 50
|
|
||||||
# # 20% of max memory usage spike expected
|
|
||||||
# spike_limit_percentage: 20
|
|
||||||
# queued_retry:
|
|
||||||
# num_workers: 4
|
|
||||||
# queue_size: 100
|
|
||||||
# retry_on_failure: true
|
|
||||||
|
|
||||||
exporters:
|
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
|
||||||
|
|
||||||
extensions:
|
|
||||||
health_check:
|
|
||||||
endpoint: 0.0.0.0:13133
|
|
||||||
zpages:
|
|
||||||
endpoint: 0.0.0.0:55679
|
|
||||||
pprof:
|
|
||||||
endpoint: 0.0.0.0:1777
|
|
||||||
|
|
||||||
service:
|
|
||||||
telemetry:
|
|
||||||
metrics:
|
|
||||||
address: 0.0.0.0:8888
|
|
||||||
extensions: [health_check, zpages, pprof]
|
|
||||||
pipelines:
|
|
||||||
metrics:
|
|
||||||
receivers: [prometheus]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [clickhousemetricswrite]
|
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
server_endpoint: ws://query-service:4320/v1/opamp
|
||||||
@@ -24,8 +24,16 @@ server {
|
|||||||
try_files $uri $uri/ /index.html;
|
try_files $uri $uri/ /index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api/alertmanager {
|
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||||
proxy_pass http://alertmanager:9093/api/v2;
|
proxy_pass http://query-service:8080;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
# connection will be closed if no data is read for 600s between successive read operations
|
||||||
|
proxy_read_timeout 600s;
|
||||||
|
|
||||||
|
# dont buffer the data send it directly to client.
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_cache off;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api {
|
location /api {
|
||||||
|
|||||||
@@ -1,8 +1,25 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.1
|
||||||
|
container_name: signoz-zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
container_name: signoz-clickhouse
|
container_name: signoz-clickhouse
|
||||||
# ports:
|
# ports:
|
||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
@@ -11,8 +28,11 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
logging:
|
logging:
|
||||||
options:
|
options:
|
||||||
@@ -34,7 +54,7 @@ services:
|
|||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
image: signoz/alertmanager:0.23.2
|
image: signoz/alertmanager:0.23.5
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -45,18 +65,34 @@ services:
|
|||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.12}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
otel-collector:
|
otel-collector:
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
image: signoz/signoz-otel-collector:0.79.5
|
image: signoz/signoz-otel-collector:0.88.12
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
]
|
]
|
||||||
# user: root # required for reading docker container logs
|
# user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
ports:
|
ports:
|
||||||
@@ -75,25 +111,9 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
otel-collector-metrics:
|
condition: service_completed_successfully
|
||||||
container_name: signoz-otel-collector-metrics
|
query-service:
|
||||||
image: signoz/signoz-otel-collector:0.79.5
|
|
||||||
command:
|
|
||||||
[
|
|
||||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
|
||||||
# ports:
|
|
||||||
# - "1777:1777" # pprof extension
|
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
|
||||||
# - "13133:13133" # Health check extension
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
restart: on-failure
|
|
||||||
depends_on:
|
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
|
|||||||
@@ -4,11 +4,11 @@ services:
|
|||||||
query-service:
|
query-service:
|
||||||
hostname: query-service
|
hostname: query-service
|
||||||
build:
|
build:
|
||||||
context: "../../../pkg/query-service"
|
context: "../../../"
|
||||||
dockerfile: "./Dockerfile"
|
dockerfile: "./pkg/query-service/Dockerfile"
|
||||||
args:
|
args:
|
||||||
LDFLAGS: ""
|
LDFLAGS: ""
|
||||||
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
|
TARGETPLATFORM: "${GOOS}/${GOARCH}"
|
||||||
container_name: signoz-query-service
|
container_name: signoz-query-service
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
@@ -22,7 +22,11 @@ services:
|
|||||||
- ./prometheus.yml:/root/config/prometheus.yml
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
command: [ "-config=/root/config/prometheus.yml" ]
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
# "--prefer-delta=true"
|
||||||
|
]
|
||||||
ports:
|
ports:
|
||||||
- "6060:6060"
|
- "6060:6060"
|
||||||
- "8080:8080"
|
- "8080:8080"
|
||||||
@@ -48,8 +52,8 @@ services:
|
|||||||
context: "../../../frontend"
|
context: "../../../frontend"
|
||||||
dockerfile: "./Dockerfile"
|
dockerfile: "./Dockerfile"
|
||||||
args:
|
args:
|
||||||
TARGETOS: "${LOCAL_GOOS}"
|
TARGETOS: "${GOOS}"
|
||||||
TARGETPLATFORM: "${LOCAL_GOARCH}"
|
TARGETPLATFORM: "${GOARCH}"
|
||||||
container_name: signoz-frontend
|
container_name: signoz-frontend
|
||||||
environment:
|
environment:
|
||||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
x-clickhouse-defaults:
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
&clickhouse-defaults
|
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
|
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||||
tty: true
|
tty: true
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper-1
|
- zookeeper-1
|
||||||
@@ -32,11 +32,12 @@ x-clickhouse-defaults:
|
|||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-clickhouse-depend:
|
x-db-depend: &db-depend
|
||||||
&clickhouse-depend
|
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
@@ -148,7 +149,7 @@ services:
|
|||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.2}
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
|
||||||
container_name: signoz-alertmanager
|
container_name: signoz-alertmanager
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
@@ -163,9 +164,13 @@ services:
|
|||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.26.1}
|
image: signoz/query-service:${DOCKER_TAG:-0.39.0}
|
||||||
container_name: signoz-query-service
|
container_name: signoz-query-service
|
||||||
command: [ "-config=/root/config/prometheus.yml" ]
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
# "--prefer-delta=true"
|
||||||
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -174,7 +179,7 @@ services:
|
|||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
- DASHBOARDS_PATH=/root/config/dashboards
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
@@ -195,10 +200,10 @@ services:
|
|||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.26.1}
|
image: signoz/frontend:${DOCKER_TAG:-0.39.0}
|
||||||
container_name: signoz-frontend
|
container_name: signoz-frontend
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -209,17 +214,34 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.12}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.5}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.12}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"--config=/etc/otel-collector-config.yaml",
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
]
|
]
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
@@ -238,25 +260,13 @@ services:
|
|||||||
# - "55678:55678" # OpenCensus receiver
|
# - "55678:55678" # OpenCensus receiver
|
||||||
# - "55679:55679" # zPages extension
|
# - "55679:55679" # zPages extension
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
<<: *clickhouse-depend
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
otel-collector-metrics:
|
condition: service_healthy
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.5}
|
otel-collector-migrator:
|
||||||
container_name: signoz-otel-collector-metrics
|
condition: service_completed_successfully
|
||||||
command:
|
query-service:
|
||||||
[
|
condition: service_healthy
|
||||||
"--config=/etc/otel-collector-metrics-config.yaml",
|
|
||||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
|
||||||
]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
|
||||||
# ports:
|
|
||||||
# - "1777:1777" # pprof extension
|
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
|
||||||
# - "13133:13133" # Health check extension
|
|
||||||
# - "55679:55679" # zPages extension
|
|
||||||
restart: on-failure
|
|
||||||
<<: *clickhouse-depend
|
|
||||||
|
|
||||||
logspout:
|
logspout:
|
||||||
image: "gliderlabs/logspout:v3.2.14"
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
|||||||
@@ -15,13 +15,9 @@ receivers:
|
|||||||
# please remove names from below if you want to collect logs from them
|
# please remove names from below if you want to collect logs from them
|
||||||
- type: filter
|
- type: filter
|
||||||
id: signoz_logs_filter
|
id: signoz_logs_filter
|
||||||
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||||
opencensus:
|
opencensus:
|
||||||
endpoint: 0.0.0.0:55678
|
endpoint: 0.0.0.0:55678
|
||||||
otlp/spanmetrics:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
endpoint: localhost:12345
|
|
||||||
otlp:
|
otlp:
|
||||||
protocols:
|
protocols:
|
||||||
grpc:
|
grpc:
|
||||||
@@ -62,46 +58,13 @@ receivers:
|
|||||||
|
|
||||||
|
|
||||||
processors:
|
processors:
|
||||||
logstransform/internal:
|
|
||||||
operators:
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.trace_id
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.span_id
|
|
||||||
output: remove_trace_id
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"traceId" in attributes or "spanId" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.traceId
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.spanId
|
|
||||||
output: remove_traceId
|
|
||||||
- id: remove_traceId
|
|
||||||
type: remove
|
|
||||||
if: '"traceId" in attributes'
|
|
||||||
field: attributes.traceId
|
|
||||||
output: remove_spanId
|
|
||||||
- id: remove_spanId
|
|
||||||
type: remove
|
|
||||||
if: '"spanId" in attributes'
|
|
||||||
field: attributes.spanId
|
|
||||||
- id: remove_trace_id
|
|
||||||
type: remove
|
|
||||||
if: '"trace_id" in attributes'
|
|
||||||
field: attributes.trace_id
|
|
||||||
output: remove_span_id
|
|
||||||
- id: remove_span_id
|
|
||||||
type: remove
|
|
||||||
if: '"span_id" in attributes'
|
|
||||||
field: attributes.span_id
|
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 10000
|
send_batch_size: 10000
|
||||||
send_batch_max_size: 11000
|
send_batch_max_size: 11000
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
signozspanmetrics/prometheus:
|
signozspanmetrics/cumulative:
|
||||||
metrics_exporter: prometheus
|
metrics_exporter: clickhousemetricswrite
|
||||||
|
metrics_flush_interval: 60s
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
dimensions:
|
dimensions:
|
||||||
@@ -132,6 +95,22 @@ processors:
|
|||||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
|
signozspanmetrics/delta:
|
||||||
|
metrics_exporter: clickhousemetricswrite
|
||||||
|
metrics_flush_interval: 60s
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 100000
|
||||||
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
|
# collectors result in incorrect APM metrics
|
||||||
|
- name: signoz.collector.id
|
||||||
|
|
||||||
extensions:
|
extensions:
|
||||||
health_check:
|
health_check:
|
||||||
@@ -152,21 +131,12 @@ exporters:
|
|||||||
enabled: true
|
enabled: true
|
||||||
clickhousemetricswrite/prometheus:
|
clickhousemetricswrite/prometheus:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
prometheus:
|
|
||||||
endpoint: 0.0.0.0:8889
|
|
||||||
# logging: {}
|
# logging: {}
|
||||||
|
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/
|
dsn: tcp://clickhouse:9000/
|
||||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
sending_queue:
|
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure:
|
|
||||||
enabled: true
|
|
||||||
initial_interval: 5s
|
|
||||||
max_interval: 30s
|
|
||||||
max_elapsed_time: 300s
|
|
||||||
|
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
@@ -179,7 +149,7 @@ service:
|
|||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/prometheus, batch]
|
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
@@ -193,10 +163,7 @@ service:
|
|||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus]
|
exporters: [clickhousemetricswrite/prometheus]
|
||||||
metrics/spanmetrics:
|
|
||||||
receivers: [otlp/spanmetrics]
|
|
||||||
exporters: [prometheus]
|
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, tcplog/docker]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [logstransform/internal, batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter]
|
exporters: [clickhouselogsexporter]
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
receivers:
|
|
||||||
otlp:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
http:
|
|
||||||
prometheus:
|
|
||||||
config:
|
|
||||||
scrape_configs:
|
|
||||||
# otel-collector-metrics internal metrics
|
|
||||||
- job_name: otel-collector-metrics
|
|
||||||
scrape_interval: 60s
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- localhost:8888
|
|
||||||
labels:
|
|
||||||
job_name: otel-collector-metrics
|
|
||||||
# SigNoz span metrics
|
|
||||||
- job_name: signozspanmetrics-collector
|
|
||||||
scrape_interval: 60s
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- otel-collector:8889
|
|
||||||
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 10000
|
|
||||||
send_batch_max_size: 11000
|
|
||||||
timeout: 10s
|
|
||||||
# memory_limiter:
|
|
||||||
# # 80% of maximum memory up to 2G
|
|
||||||
# limit_mib: 1500
|
|
||||||
# # 25% of limit up to 2G
|
|
||||||
# spike_limit_mib: 512
|
|
||||||
# check_interval: 5s
|
|
||||||
#
|
|
||||||
# # 50% of the maximum memory
|
|
||||||
# limit_percentage: 50
|
|
||||||
# # 20% of max memory usage spike expected
|
|
||||||
# spike_limit_percentage: 20
|
|
||||||
# queued_retry:
|
|
||||||
# num_workers: 4
|
|
||||||
# queue_size: 100
|
|
||||||
# retry_on_failure: true
|
|
||||||
|
|
||||||
extensions:
|
|
||||||
health_check:
|
|
||||||
endpoint: 0.0.0.0:13133
|
|
||||||
zpages:
|
|
||||||
endpoint: 0.0.0.0:55679
|
|
||||||
pprof:
|
|
||||||
endpoint: 0.0.0.0:1777
|
|
||||||
|
|
||||||
exporters:
|
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
|
||||||
|
|
||||||
service:
|
|
||||||
telemetry:
|
|
||||||
metrics:
|
|
||||||
address: 0.0.0.0:8888
|
|
||||||
extensions:
|
|
||||||
- health_check
|
|
||||||
- zpages
|
|
||||||
- pprof
|
|
||||||
pipelines:
|
|
||||||
metrics:
|
|
||||||
receivers: [prometheus]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [clickhousemetricswrite]
|
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
server_endpoint: ws://query-service:4320/v1/opamp
|
||||||
@@ -24,8 +24,16 @@ server {
|
|||||||
try_files $uri $uri/ /index.html;
|
try_files $uri $uri/ /index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api/alertmanager {
|
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||||
proxy_pass http://alertmanager:9093/api/v2;
|
proxy_pass http://query-service:8080;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
# connection will be closed if no data is read for 600s between successive read operations
|
||||||
|
proxy_read_timeout 600s;
|
||||||
|
|
||||||
|
# dont buffer the data send it directly to client.
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_cache off;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api {
|
location /api {
|
||||||
|
|||||||
@@ -534,7 +534,7 @@ else
|
|||||||
echo ""
|
echo ""
|
||||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||||
echo ""
|
echo ""
|
||||||
echo "ℹ️ By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
|
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||||
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||||
|
|||||||
14
e2e/package.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"name": "e2e",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"main": "index.js",
|
||||||
|
"license": "MIT",
|
||||||
|
"devDependencies": {
|
||||||
|
"@playwright/test": "^1.22.0",
|
||||||
|
"@types/node": "^20.9.2"
|
||||||
|
},
|
||||||
|
"scripts": {},
|
||||||
|
"dependencies": {
|
||||||
|
"dotenv": "8.2.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
46
e2e/playwright.config.ts
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import { defineConfig, devices } from "@playwright/test";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
testDir: "./tests",
|
||||||
|
|
||||||
|
fullyParallel: true,
|
||||||
|
|
||||||
|
forbidOnly: !!process.env.CI,
|
||||||
|
|
||||||
|
name: "Signoz E2E",
|
||||||
|
|
||||||
|
retries: process.env.CI ? 2 : 0,
|
||||||
|
|
||||||
|
reporter: process.env.CI ? "github" : "list",
|
||||||
|
|
||||||
|
preserveOutput: "always",
|
||||||
|
|
||||||
|
updateSnapshots: "all",
|
||||||
|
|
||||||
|
quiet: false,
|
||||||
|
|
||||||
|
testMatch: ["**/*.spec.ts"],
|
||||||
|
|
||||||
|
use: {
|
||||||
|
trace: "on-first-retry",
|
||||||
|
|
||||||
|
baseURL:
|
||||||
|
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
|
||||||
|
},
|
||||||
|
|
||||||
|
projects: [
|
||||||
|
{ name: "setup", testMatch: /.*\.setup\.ts/ },
|
||||||
|
{
|
||||||
|
name: "chromium",
|
||||||
|
use: {
|
||||||
|
...devices["Desktop Chrome"],
|
||||||
|
// Use prepared auth state.
|
||||||
|
storageState: ".auth/user.json",
|
||||||
|
},
|
||||||
|
dependencies: ["setup"],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
37
e2e/tests/auth.setup.ts
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import { test, expect } from "@playwright/test";
|
||||||
|
import ROUTES from "../../frontend/src/constants/routes";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const authFile = ".auth/user.json";
|
||||||
|
|
||||||
|
test("E2E Login Test", async ({ page }) => {
|
||||||
|
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
|
||||||
|
|
||||||
|
const signup = "Monitor your applications. Find what is causing issues.";
|
||||||
|
|
||||||
|
const el = await page.locator(`text=${signup}`);
|
||||||
|
|
||||||
|
expect(el).toBeVisible();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.locator("id=loginEmail")
|
||||||
|
.type(
|
||||||
|
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
|
||||||
|
);
|
||||||
|
|
||||||
|
await page.getByText("Next").click();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.locator('input[id="currentPassword"]')
|
||||||
|
.fill(
|
||||||
|
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
|
||||||
|
);
|
||||||
|
|
||||||
|
await page.locator('button[data-attr="signup"]').click();
|
||||||
|
|
||||||
|
await expect(page).toHaveURL(ROUTES.APPLICATION);
|
||||||
|
|
||||||
|
await page.context().storageState({ path: authFile });
|
||||||
|
});
|
||||||
10
e2e/tests/contants.ts
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
export const SERVICE_TABLE_HEADERS = {
|
||||||
|
APPLICATION: "Applicaton",
|
||||||
|
P99LATENCY: "P99 latency (in ms)",
|
||||||
|
ERROR_RATE: "Error Rate (% of total)",
|
||||||
|
OPS_PER_SECOND: "Operations Per Second",
|
||||||
|
};
|
||||||
|
|
||||||
|
export const DATA_TEST_IDS = {
|
||||||
|
NEW_DASHBOARD_BTN: "create-new-dashboard",
|
||||||
|
};
|
||||||
40
e2e/tests/navigation.spec.ts
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
import { test, expect } from "@playwright/test";
|
||||||
|
import ROUTES from "../../frontend/src/constants/routes";
|
||||||
|
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
|
||||||
|
|
||||||
|
test("Basic Navigation Check across different resources", async ({ page }) => {
|
||||||
|
// route to services page and check if the page renders fine with BE contract
|
||||||
|
await Promise.all([
|
||||||
|
page.goto(ROUTES.APPLICATION),
|
||||||
|
page.waitForRequest("**/v1/services"),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const p99Latency = page.locator(
|
||||||
|
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(p99Latency).toBeVisible();
|
||||||
|
|
||||||
|
// route to the new trace explorer page and check if the page renders fine
|
||||||
|
await page.goto(ROUTES.TRACES_EXPLORER);
|
||||||
|
|
||||||
|
await page.waitForLoadState("networkidle");
|
||||||
|
|
||||||
|
const listViewTable = await page
|
||||||
|
.locator('div[role="presentation"]')
|
||||||
|
.isVisible();
|
||||||
|
|
||||||
|
expect(listViewTable).toBeTruthy();
|
||||||
|
|
||||||
|
// route to the dashboards page and check if the page renders fine
|
||||||
|
await Promise.all([
|
||||||
|
page.goto(ROUTES.ALL_DASHBOARD),
|
||||||
|
page.waitForRequest("**/v1/dashboards"),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const newDashboardBtn = await page
|
||||||
|
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
|
||||||
|
.isVisible();
|
||||||
|
|
||||||
|
expect(newDashboardBtn).toBeTruthy();
|
||||||
|
});
|
||||||
46
e2e/yarn.lock
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||||
|
# yarn lockfile v1
|
||||||
|
|
||||||
|
|
||||||
|
"@playwright/test@^1.22.0":
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
|
||||||
|
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
|
||||||
|
dependencies:
|
||||||
|
playwright "1.40.0"
|
||||||
|
|
||||||
|
"@types/node@^20.9.2":
|
||||||
|
version "20.9.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
|
||||||
|
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
|
||||||
|
dependencies:
|
||||||
|
undici-types "~5.26.4"
|
||||||
|
|
||||||
|
dotenv@8.2.0:
|
||||||
|
version "8.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
|
||||||
|
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
|
||||||
|
|
||||||
|
fsevents@2.3.2:
|
||||||
|
version "2.3.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
|
||||||
|
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
|
||||||
|
|
||||||
|
playwright-core@1.40.0:
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
|
||||||
|
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
|
||||||
|
|
||||||
|
playwright@1.40.0:
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
|
||||||
|
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
|
||||||
|
dependencies:
|
||||||
|
playwright-core "1.40.0"
|
||||||
|
optionalDependencies:
|
||||||
|
fsevents "2.3.2"
|
||||||
|
|
||||||
|
undici-types@~5.26.4:
|
||||||
|
version "5.26.5"
|
||||||
|
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
|
||||||
|
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
|
||||||
@@ -1,43 +1,24 @@
|
|||||||
FROM golang:1.18-buster AS builder
|
|
||||||
|
|
||||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
|
||||||
ARG LD_FLAGS
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
|
|
||||||
ENV CGO_ENABLED=1
|
|
||||||
ENV GOPATH=/go
|
|
||||||
|
|
||||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
|
||||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
|
|
||||||
|
|
||||||
# Prepare and enter src directory
|
|
||||||
WORKDIR /go/src/github.com/signoz/signoz
|
|
||||||
|
|
||||||
# Add the sources and proceed with build
|
|
||||||
ADD . .
|
|
||||||
RUN cd ee/query-service \
|
|
||||||
&& go build -tags timetzdata -a -o ./bin/query-service \
|
|
||||||
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
|
|
||||||
&& chmod +x ./bin/query-service
|
|
||||||
|
|
||||||
|
|
||||||
# use a minimal alpine image
|
# use a minimal alpine image
|
||||||
FROM alpine:3.7
|
FROM alpine:3.18.6
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|
||||||
|
# define arguments that can be passed during build time
|
||||||
|
ARG TARGETOS TARGETARCH
|
||||||
|
|
||||||
# add ca-certificates in case you need them
|
# add ca-certificates in case you need them
|
||||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
# set working directory
|
# set working directory
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
# copy the binary from builder
|
# copy the query-service binary
|
||||||
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
|
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
|
||||||
|
|
||||||
# copy prometheus YAML config
|
# copy prometheus YAML config
|
||||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||||
|
COPY pkg/query-service/templates /root/templates
|
||||||
|
|
||||||
# Make query-service executable for non-root users
|
# Make query-service executable for non-root users
|
||||||
RUN chmod 755 /root /root/query-service
|
RUN chmod 755 /root /root/query-service
|
||||||
@@ -45,7 +26,6 @@ RUN chmod 755 /root /root/query-service
|
|||||||
# run the binary
|
# run the binary
|
||||||
ENTRYPOINT ["./query-service"]
|
ENTRYPOINT ["./query-service"]
|
||||||
|
|
||||||
CMD ["-config", "../config/prometheus.yml"]
|
CMD ["-config", "/root/config/prometheus.yml"]
|
||||||
# CMD ["./query-service -config /root/config/prometheus.yml"]
|
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|||||||
@@ -8,8 +8,11 @@ import (
|
|||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||||
@@ -26,9 +29,14 @@ type APIHandlerOptions struct {
|
|||||||
DialTimeout time.Duration
|
DialTimeout time.Duration
|
||||||
AppDao dao.ModelDao
|
AppDao dao.ModelDao
|
||||||
RulesManager *rules.Manager
|
RulesManager *rules.Manager
|
||||||
|
UsageManager *usage.Manager
|
||||||
FeatureFlags baseint.FeatureLookup
|
FeatureFlags baseint.FeatureLookup
|
||||||
LicenseManager *license.Manager
|
LicenseManager *license.Manager
|
||||||
|
IntegrationsController *integrations.Controller
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
|
Cache cache.Cache
|
||||||
|
// Querier Influx Interval
|
||||||
|
FluxInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
type APIHandler struct {
|
type APIHandler struct {
|
||||||
@@ -50,7 +58,10 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
|||||||
AppDao: opts.AppDao,
|
AppDao: opts.AppDao,
|
||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
FeatureFlags: opts.FeatureFlags,
|
FeatureFlags: opts.FeatureFlags,
|
||||||
|
IntegrationsController: opts.IntegrationsController,
|
||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
|
Cache: opts.Cache,
|
||||||
|
FluxInterval: opts.FluxInterval,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -76,6 +87,10 @@ func (ah *APIHandler) LM() *license.Manager {
|
|||||||
return ah.opts.LicenseManager
|
return ah.opts.LicenseManager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) UM() *usage.Manager {
|
||||||
|
return ah.opts.UsageManager
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||||
return ah.opts.AppDao
|
return ah.opts.AppDao
|
||||||
}
|
}
|
||||||
@@ -140,9 +155,21 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
|||||||
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
||||||
|
|
||||||
// PAT APIs
|
// PAT APIs
|
||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.createPAT)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
||||||
|
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||||
|
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v2/licenses",
|
||||||
|
am.ViewAccess(ah.listLicensesV2)).
|
||||||
|
Methods(http.MethodGet)
|
||||||
|
|
||||||
ah.APIHandler.RegisterRoutes(router, am)
|
ah.APIHandler.RegisterRoutes(router, am)
|
||||||
|
|
||||||
|
|||||||
@@ -5,22 +5,23 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseRequest(r *http.Request, req interface{}) error {
|
func parseRequest(r *http.Request, req interface{}) error {
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := ioutil.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -71,9 +72,9 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
var req *baseauth.RegisterRequest
|
var req *baseauth.RegisterRequest
|
||||||
|
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := ioutil.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received no input in api\n", err)
|
zap.L().Error("received no input in api", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -81,7 +82,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
err = json.Unmarshal(requestBody, &req)
|
err = json.Unmarshal(requestBody, &req)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received invalid user registration request", zap.Error(err))
|
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -89,13 +90,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
// get invite object
|
// get invite object
|
||||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to validate invite token", err)
|
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if invite == nil {
|
if invite == nil {
|
||||||
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
|
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -103,17 +104,17 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
// get auth domain from email domain
|
// get auth domain from email domain
|
||||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
zap.S().Errorf("failed to get domain from email", apierr)
|
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
precheckResp := &model.PrecheckResponse{
|
precheckResp := &basemodel.PrecheckResponse{
|
||||||
SSO: false,
|
SSO: false,
|
||||||
IsUser: false,
|
IsUser: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
if domain != nil && domain.SsoEnabled {
|
if domain != nil && domain.SsoEnabled {
|
||||||
// so is enabled, create user and respond precheck data
|
// sso is enabled, create user and respond precheck data
|
||||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
@@ -204,24 +205,24 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if !ah.CheckFeature(model.SSO) {
|
if !ah.CheckFeature(model.SSO) {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
q := r.URL.Query()
|
q := r.URL.Query()
|
||||||
if errType := q.Get("error"); errType != "" {
|
if errType := q.Get("error"); errType != "" {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
|
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
relayState := q.Get("state")
|
relayState := q.Get("state")
|
||||||
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||||
|
|
||||||
parsedState, err := url.Parse(relayState)
|
parsedState, err := url.Parse(relayState)
|
||||||
if err != nil || relayState == "" {
|
if err != nil || relayState == "" {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -243,14 +244,14 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
|
|||||||
|
|
||||||
identity, err := callbackHandler.HandleCallback(r)
|
identity, err := callbackHandler.HandleCallback(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -265,14 +266,14 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if !ah.CheckFeature(model.SSO) {
|
if !ah.CheckFeature(model.SSO) {
|
||||||
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -280,11 +281,11 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
// the relay state is sent when a login request is submitted to
|
// the relay state is sent when a login request is submitted to
|
||||||
// Idp.
|
// Idp.
|
||||||
relayState := r.FormValue("RelayState")
|
relayState := r.FormValue("RelayState")
|
||||||
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||||
|
|
||||||
parsedState, err := url.Parse(relayState)
|
parsedState, err := url.Parse(relayState)
|
||||||
if err != nil || relayState == "" {
|
if err != nil || relayState == "" {
|
||||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -301,34 +302,34 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if assertionInfo.WarningInfo.InvalidTime {
|
if assertionInfo.WarningInfo.InvalidTime {
|
||||||
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
|
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
email := assertionInfo.NameID
|
email := assertionInfo.NameID
|
||||||
if email == "" {
|
if email == "" {
|
||||||
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
|
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String()))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||||
handleSsoError(w, r, redirectUri)
|
handleSsoError(w, r, redirectUri)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
51
ee/query-service/app/api/dashboard.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/common"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ah.lockUnlockDashboard(w, r, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ah.lockUnlockDashboard(w, r, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
|
||||||
|
// Locking can only be done by the owner of the dashboard
|
||||||
|
// or an admin
|
||||||
|
|
||||||
|
// - Fetch the dashboard
|
||||||
|
// - Check if the user is the owner or an admin
|
||||||
|
// - If yes, lock/unlock the dashboard
|
||||||
|
// - If no, return 403
|
||||||
|
|
||||||
|
// Get the dashboard UUID from the request
|
||||||
|
uuid := mux.Vars(r)["uuid"]
|
||||||
|
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user := common.GetUserFromContext(r.Context())
|
||||||
|
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock/Unlock the dashboard
|
||||||
|
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, "Dashboard updated successfully")
|
||||||
|
}
|
||||||
@@ -4,10 +4,61 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type DayWiseBreakdown struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Breakdown []DayWiseData `json:"breakdown"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DayWiseData struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Count float64 `json:"count"`
|
||||||
|
Size float64 `json:"size"`
|
||||||
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
|
Quantity float64 `json:"quantity"`
|
||||||
|
Total float64 `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tierBreakdown struct {
|
||||||
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
|
Quantity float64 `json:"quantity"`
|
||||||
|
TierStart int64 `json:"tierStart"`
|
||||||
|
TierEnd int64 `json:"tierEnd"`
|
||||||
|
TierCost float64 `json:"tierCost"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type usageResponse struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
Tiers []tierBreakdown `json:"tiers"`
|
||||||
|
DayWiseBreakdown DayWiseBreakdown `json:"dayWiseBreakdown"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type details struct {
|
||||||
|
Total float64 `json:"total"`
|
||||||
|
Breakdown []usageResponse `json:"breakdown"`
|
||||||
|
BaseFee float64 `json:"baseFee"`
|
||||||
|
BillTotal float64 `json:"billTotal"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type billingDetails struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
BillingPeriodStart int64 `json:"billingPeriodStart"`
|
||||||
|
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
|
||||||
|
Details details `json:"details"`
|
||||||
|
Discount float64 `json:"discount"`
|
||||||
|
SubscriptionStatus string `json:"subscriptionStatus"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
@@ -17,7 +68,6 @@ func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
|
||||||
var l model.License
|
var l model.License
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||||
@@ -29,8 +79,7 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
||||||
license, apiError := ah.LM().Activate(ctx, l.Key)
|
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
RespondError(w, apiError, nil)
|
RespondError(w, apiError, nil)
|
||||||
return
|
return
|
||||||
@@ -38,3 +87,186 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
ah.Respond(w, license)
|
ah.Respond(w, license)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
type checkoutResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
RedirectURL string `json:"redirectURL"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
licenseResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var resp checkoutResponse
|
||||||
|
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, resp.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||||
|
licenseKey := r.URL.Query().Get("licenseKey")
|
||||||
|
|
||||||
|
if licenseKey == "" {
|
||||||
|
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
billingURL := fmt.Sprintf("%s/usage?licenseKey=%s", constants.LicenseSignozIo, licenseKey)
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("GET", billingURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
billingResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var billingResponse billingDetails
|
||||||
|
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(srikanthccv):Fetch the current day usage and add it to the response
|
||||||
|
ah.Respond(w, billingResponse.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
|
if apiError != nil {
|
||||||
|
RespondError(w, apiError, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := model.Licenses{
|
||||||
|
TrialStart: -1,
|
||||||
|
TrialEnd: -1,
|
||||||
|
OnTrial: false,
|
||||||
|
WorkSpaceBlock: false,
|
||||||
|
TrialConvertedToSubscription: false,
|
||||||
|
GracePeriodEnd: -1,
|
||||||
|
Licenses: licenses,
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentActiveLicenseKey string
|
||||||
|
|
||||||
|
for _, license := range licenses {
|
||||||
|
if license.IsCurrent {
|
||||||
|
currentActiveLicenseKey = license.Key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For the case when no license is applied i.e community edition
|
||||||
|
// There will be no trial details or license details
|
||||||
|
if currentActiveLicenseKey == "" {
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch trial details
|
||||||
|
hClient := &http.Client{}
|
||||||
|
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while creating request for trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
trialResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer trialResp.Body.Close()
|
||||||
|
|
||||||
|
trialRespBody, err := io.ReadAll(trialResp.Body)
|
||||||
|
|
||||||
|
if err != nil || trialResp.StatusCode != http.StatusOK {
|
||||||
|
zap.L().Error("Error while fetching trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var trialRespData model.SubscriptionServerResp
|
||||||
|
|
||||||
|
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
|
||||||
|
zap.L().Error("Error while decoding trial details", zap.Error(err))
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.TrialStart = trialRespData.Data.TrialStart
|
||||||
|
resp.TrialEnd = trialRespData.Data.TrialEnd
|
||||||
|
resp.OnTrial = trialRespData.Data.OnTrial
|
||||||
|
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
|
||||||
|
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
|
||||||
|
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
|
||||||
|
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
type checkoutResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
RedirectURL string `json:"redirectURL"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
licenseResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var resp checkoutResponse
|
||||||
|
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, resp.Data)
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,14 +18,14 @@ import (
|
|||||||
|
|
||||||
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
|
||||||
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
|
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
|
||||||
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
|
zap.L().Info("CustomMetricsFunction feature is not enabled in this plan")
|
||||||
ah.APIHandler.QueryRangeMetricsV2(w, r)
|
ah.APIHandler.QueryRangeMetricsV2(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
|
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
|
||||||
|
|
||||||
if apiErrorObj != nil {
|
if apiErrorObj != nil {
|
||||||
zap.S().Errorf(apiErrorObj.Err.Error())
|
zap.L().Error("Error in parsing metric query params", zap.Error(apiErrorObj.Err))
|
||||||
RespondError(w, apiErrorObj, nil)
|
RespondError(w, apiErrorObj, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -137,8 +137,8 @@ func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request
|
|||||||
var s basemodel.Series
|
var s basemodel.Series
|
||||||
s.QueryName = name
|
s.QueryName = name
|
||||||
s.Labels = v.Metric.Copy().Map()
|
s.Labels = v.Metric.Copy().Map()
|
||||||
for _, p := range v.Points {
|
for _, p := range v.Floats {
|
||||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
|
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.F})
|
||||||
}
|
}
|
||||||
seriesList = append(seriesList, &s)
|
seriesList = append(seriesList, &s)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ import (
|
|||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,7 +29,7 @@ func generatePATToken() string {
|
|||||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
req := model.PAT{}
|
req := model.CreatePATRequestBody{}
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
return
|
return
|
||||||
@@ -40,20 +42,87 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pat := model.PAT{
|
||||||
|
Name: req.Name,
|
||||||
|
Role: req.Role,
|
||||||
|
ExpiresAt: req.ExpiresInDays,
|
||||||
|
}
|
||||||
|
err = validatePATRequest(pat)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// All the PATs are associated with the user creating the PAT. Hence, the permissions
|
// All the PATs are associated with the user creating the PAT.
|
||||||
// associated with the PAT is also equivalent to that of the user.
|
pat.UserID = user.Id
|
||||||
req.UserID = user.Id
|
pat.CreatedAt = time.Now().Unix()
|
||||||
req.CreatedAt = time.Now().Unix()
|
pat.UpdatedAt = time.Now().Unix()
|
||||||
req.Token = generatePATToken()
|
pat.LastUsed = 0
|
||||||
|
pat.Token = generatePATToken()
|
||||||
|
|
||||||
zap.S().Debugf("Got PAT request: %+v", req)
|
if pat.ExpiresAt != 0 {
|
||||||
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
|
// convert expiresAt to unix timestamp from days
|
||||||
|
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
|
||||||
|
}
|
||||||
|
|
||||||
|
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||||
|
var apierr basemodel.BaseApiError
|
||||||
|
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ah.Respond(w, &req)
|
ah.Respond(w, &pat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatePATRequest(req model.PAT) error {
|
||||||
|
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||||
|
return fmt.Errorf("valid role is required")
|
||||||
|
}
|
||||||
|
if req.ExpiresAt < 0 {
|
||||||
|
return fmt.Errorf("valid expiresAt is required")
|
||||||
|
}
|
||||||
|
if req.Name == "" {
|
||||||
|
return fmt.Errorf("valid name is required")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
req := model.PAT{}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user, err := auth.GetUserFromRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{
|
||||||
|
Typ: model.ErrorUnauthorized,
|
||||||
|
Err: err,
|
||||||
|
}, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = validatePATRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.UpdatedByUserID = user.Id
|
||||||
|
id := mux.Vars(r)["id"]
|
||||||
|
req.UpdatedAt = time.Now().Unix()
|
||||||
|
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||||
|
var apierr basemodel.BaseApiError
|
||||||
|
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||||
|
RespondError(w, apierr, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -66,8 +135,8 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
zap.S().Infof("Get PATs for user: %+v", user.Id)
|
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
|
||||||
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id)
|
pats, apierr := ah.AppDao().ListPATs(ctx)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
@@ -75,7 +144,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.Respond(w, pats)
|
ah.Respond(w, pats)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
id := mux.Vars(r)["id"]
|
id := mux.Vars(r)["id"]
|
||||||
user, err := auth.GetUserFromRequest(r)
|
user, err := auth.GetUserFromRequest(r)
|
||||||
@@ -86,22 +155,11 @@ func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
|
|
||||||
if apierr != nil {
|
zap.L().Info("Revoke PAT with id", zap.String("id", id))
|
||||||
|
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if pat.UserID != user.Id {
|
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
|
||||||
RespondError(w, &model.ApiError{
|
|
||||||
Typ: model.ErrorUnauthorized,
|
|
||||||
Err: fmt.Errorf("unauthorized PAT delete request"),
|
|
||||||
}, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
zap.S().Debugf("Delete PAT with id: %+v", id)
|
|
||||||
if apierr := ah.AppDao().DeletePAT(ctx, id); apierr != nil {
|
|
||||||
RespondError(w, apierr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ah.Respond(w, map[string]string{"data": "pat deleted successfully"})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||||
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
|
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||||
ah.APIHandler.SearchTraces(w, r)
|
ah.APIHandler.SearchTraces(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -26,7 +26,7 @@ func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
|
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
|
zap.L().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
|
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ import (
|
|||||||
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
||||||
|
|
||||||
defer utils.Elapsed("GetMetricResult")()
|
defer utils.Elapsed("GetMetricResult")()
|
||||||
zap.S().Infof("Executing metric result query: %s", query)
|
zap.L().Info("Executing metric result query: ", zap.String("query", query))
|
||||||
|
|
||||||
var hash string
|
var hash string
|
||||||
// If getSubTreeSpans function is used in the clickhouse query
|
// If getSubTreeSpans function is used in the clickhouse query
|
||||||
@@ -38,9 +38,8 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rows, err := r.conn.Query(ctx, query)
|
rows, err := r.conn.Query(ctx, query)
|
||||||
zap.S().Debug(query)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing query: ", err)
|
zap.L().Error("Error in processing query", zap.Error(err))
|
||||||
return nil, "", fmt.Errorf("error in processing query")
|
return nil, "", fmt.Errorf("error in processing query")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,7 +116,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
|
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.Strings(groupBy)
|
sort.Strings(groupBy)
|
||||||
@@ -140,7 +139,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
}
|
}
|
||||||
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// zap.S().Error("Error in dropping temporary table: ", err)
|
// zap.L().Error("Error in dropping temporary table: ", err)
|
||||||
// return nil, err
|
// return nil, err
|
||||||
// }
|
// }
|
||||||
if hash == "" {
|
if hash == "" {
|
||||||
@@ -152,7 +151,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
|||||||
|
|
||||||
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
||||||
|
|
||||||
zap.S().Debugf("Executing getSubTreeSpans function")
|
zap.L().Debug("Executing getSubTreeSpans function")
|
||||||
|
|
||||||
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
||||||
|
|
||||||
@@ -162,28 +161,28 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
|
|
||||||
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in dropping temporary table: ", err)
|
zap.L().Error("Error in dropping temporary table", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary table to store the getSubTreeSpans() results
|
// Create temporary table to store the getSubTreeSpans() results
|
||||||
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in creating temporary table: ", err)
|
zap.L().Error("Error in creating temporary table", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
||||||
getSpansSubQuery := subtreeInput
|
getSpansSubQuery := subtreeInput
|
||||||
// Execute the subTree query
|
// Execute the subTree query
|
||||||
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
|
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
|
||||||
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
||||||
|
|
||||||
// zap.S().Info(getSpansSubQuery)
|
// zap.L().Info(getSpansSubQuery)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
return query, hash, fmt.Errorf("Error in processing sql query")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,16 +195,16 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
if len(getSpansSubQueryDBResponses) == 0 {
|
if len(getSpansSubQueryDBResponses) == 0 {
|
||||||
return query, hash, fmt.Errorf("No spans found for the given query")
|
return query, hash, fmt.Errorf("No spans found for the given query")
|
||||||
}
|
}
|
||||||
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
|
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
|
||||||
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
return query, hash, fmt.Errorf("Error in processing sql query")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process model to fetch the spans
|
// Process model to fetch the spans
|
||||||
zap.S().Debugf("Processing model to fetch the spans")
|
zap.L().Debug("Processing model to fetch the spans")
|
||||||
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
||||||
for _, item := range searchScanResponses {
|
for _, item := range searchScanResponses {
|
||||||
var jsonItem basemodel.SearchSpanResponseItem
|
var jsonItem basemodel.SearchSpanResponseItem
|
||||||
@@ -218,17 +217,17 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
}
|
}
|
||||||
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
||||||
// Use map to store pointer to the spans to avoid duplicates and save memory
|
// Use map to store pointer to the spans to avoid duplicates and save memory
|
||||||
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
|
|
||||||
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
|
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in preparing batch statement: ", err)
|
zap.L().Error("Error in preparing batch statement", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
for _, span := range treeSearchResponse {
|
for _, span := range treeSearchResponse {
|
||||||
@@ -251,14 +250,14 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
|
|||||||
span.Events,
|
span.Events,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Debug("Error in processing sql query: ", err)
|
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
|
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
|
||||||
err = statement.Send()
|
err = statement.Send()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error in sending statement: ", err)
|
zap.L().Error("Error in sending statement", zap.Error(err))
|
||||||
return query, hash, err
|
return query, hash, err
|
||||||
}
|
}
|
||||||
return query, hash, nil
|
return query, hash, nil
|
||||||
@@ -323,7 +322,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
|
|||||||
spans = append(spans, span)
|
spans = append(spans, span)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debug("Building Tree")
|
zap.L().Debug("Building Tree")
|
||||||
roots, err := buildSpanTrees(&spans)
|
roots, err := buildSpanTrees(&spans)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -333,7 +332,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
|
|||||||
// For each root, get the subtree spans
|
// For each root, get the subtree spans
|
||||||
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
||||||
targetSpan := &model.SpanForTraceDetails{}
|
targetSpan := &model.SpanForTraceDetails{}
|
||||||
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
||||||
// Search target span object in the tree
|
// Search target span object in the tree
|
||||||
for _, root := range roots {
|
for _, root := range roots {
|
||||||
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
||||||
@@ -341,7 +340,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,8 +24,9 @@ func NewDataConnector(
|
|||||||
maxIdleConns int,
|
maxIdleConns int,
|
||||||
maxOpenConns int,
|
maxOpenConns int,
|
||||||
dialTimeout time.Duration,
|
dialTimeout time.Duration,
|
||||||
|
cluster string,
|
||||||
) *ClickhouseReader {
|
) *ClickhouseReader {
|
||||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout)
|
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
|
||||||
return &ClickhouseReader{
|
return &ClickhouseReader{
|
||||||
conn: ch.GetConn(),
|
conn: ch.GetConn(),
|
||||||
appdb: localDB,
|
appdb: localDB,
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -186,7 +186,7 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
|
|||||||
|
|
||||||
// If the parent span is not found, add current span to list of roots
|
// If the parent span is not found, add current span to list of roots
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
|
// zap.L().Debug("Parent Span not found parent_id: ", span.ParentID)
|
||||||
roots = append(roots, span)
|
roots = append(roots, span)
|
||||||
span.ParentID = ""
|
span.ParentID = ""
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
@@ -20,9 +20,13 @@ import (
|
|||||||
"github.com/soheilhy/cmux"
|
"github.com/soheilhy/cmux"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
|
||||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/usage"
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
@@ -31,10 +35,11 @@ import (
|
|||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||||
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||||
@@ -62,6 +67,9 @@ type ServerOptions struct {
|
|||||||
MaxIdleConns int
|
MaxIdleConns int
|
||||||
MaxOpenConns int
|
MaxOpenConns int
|
||||||
DialTimeout time.Duration
|
DialTimeout time.Duration
|
||||||
|
CacheConfigPath string
|
||||||
|
FluxInterval string
|
||||||
|
Cluster string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP api service
|
// Server runs HTTP api service
|
||||||
@@ -85,6 +93,8 @@ type Server struct {
|
|||||||
// Usage manager
|
// Usage manager
|
||||||
usageManager *usage.Manager
|
usageManager *usage.Manager
|
||||||
|
|
||||||
|
opampServer *opamp.Server
|
||||||
|
|
||||||
unavailableChannel chan healthcheck.Status
|
unavailableChannel chan healthcheck.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
var reader interfaces.DataConnector
|
var reader interfaces.DataConnector
|
||||||
storage := os.Getenv("STORAGE")
|
storage := os.Getenv("STORAGE")
|
||||||
if storage == "clickhouse" {
|
if storage == "clickhouse" {
|
||||||
zap.S().Info("Using ClickHouse as datastore ...")
|
zap.L().Info("Using ClickHouse as datastore ...")
|
||||||
qb := db.NewDataConnector(
|
qb := db.NewDataConnector(
|
||||||
localDB,
|
localDB,
|
||||||
serverOptions.PromConfigPath,
|
serverOptions.PromConfigPath,
|
||||||
@@ -132,6 +142,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.MaxIdleConns,
|
serverOptions.MaxIdleConns,
|
||||||
serverOptions.MaxOpenConns,
|
serverOptions.MaxOpenConns,
|
||||||
serverOptions.DialTimeout,
|
serverOptions.DialTimeout,
|
||||||
|
serverOptions.Cluster,
|
||||||
)
|
)
|
||||||
go qb.Start(readerReady)
|
go qb.Start(readerReady)
|
||||||
reader = qb
|
reader = qb
|
||||||
@@ -161,24 +172,38 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initiate opamp
|
// initiate opamp
|
||||||
_, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
_, err = opAmpModel.InitDB(localDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
integrationsController, err := integrations.NewController(localDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"couldn't create integrations controller: %w", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ingestion pipelines manager
|
||||||
|
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||||
|
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// initiate agent config handler
|
// initiate agent config handler
|
||||||
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
|
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||||
return nil, err
|
DB: localDB,
|
||||||
}
|
DBEngine: AppDbEngine,
|
||||||
|
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||||
// ingestion pipelines manager
|
})
|
||||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the usagemanager
|
// start the usagemanager
|
||||||
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
|
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -188,6 +213,22 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
telemetry.GetInstance().SetReader(reader)
|
telemetry.GetInstance().SetReader(reader)
|
||||||
|
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||||
|
|
||||||
|
var c cache.Cache
|
||||||
|
if serverOptions.CacheConfigPath != "" {
|
||||||
|
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c = cache.NewCache(cacheOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
apiOpts := api.APIHandlerOptions{
|
apiOpts := api.APIHandlerOptions{
|
||||||
DataConnector: reader,
|
DataConnector: reader,
|
||||||
@@ -199,9 +240,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
DialTimeout: serverOptions.DialTimeout,
|
DialTimeout: serverOptions.DialTimeout,
|
||||||
AppDao: modelDao,
|
AppDao: modelDao,
|
||||||
RulesManager: rm,
|
RulesManager: rm,
|
||||||
|
UsageManager: usageManager,
|
||||||
FeatureFlags: lm,
|
FeatureFlags: lm,
|
||||||
LicenseManager: lm,
|
LicenseManager: lm,
|
||||||
|
IntegrationsController: integrationsController,
|
||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
|
Cache: c,
|
||||||
|
FluxInterval: fluxInterval,
|
||||||
}
|
}
|
||||||
|
|
||||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||||
@@ -233,6 +278,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
s.privateHTTP = privateServer
|
s.privateHTTP = privateServer
|
||||||
|
|
||||||
|
s.opampServer = opamp.InitializeServer(
|
||||||
|
&opAmpModel.AllAgents, agentConfMgr,
|
||||||
|
)
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -240,6 +289,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
|||||||
|
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
|
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddlewarePrivate)
|
r.Use(loggingMiddlewarePrivate)
|
||||||
@@ -266,25 +316,13 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
|||||||
|
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
|
|
||||||
|
// add auth middleware
|
||||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||||
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
return auth.GetUserFromRequest(r, apiHandler)
|
||||||
if len(patToken) > 0 {
|
|
||||||
zap.S().Debugf("Received a non-zero length PAT token")
|
|
||||||
ctx := context.Background()
|
|
||||||
dao := apiHandler.AppDao()
|
|
||||||
|
|
||||||
user, err := dao.GetUserByPAT(ctx, patToken)
|
|
||||||
if err == nil && user != nil {
|
|
||||||
zap.S().Debugf("Found valid PAT user: %+v", user)
|
|
||||||
return user, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Debugf("Error while getting user for PAT: %+v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return baseauth.GetUserFromRequest(r)
|
|
||||||
}
|
}
|
||||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||||
|
|
||||||
|
r.Use(baseapp.LogCommentEnricher)
|
||||||
r.Use(setTimeoutMiddleware)
|
r.Use(setTimeoutMiddleware)
|
||||||
r.Use(s.analyticsMiddleware)
|
r.Use(s.analyticsMiddleware)
|
||||||
r.Use(loggingMiddleware)
|
r.Use(loggingMiddleware)
|
||||||
@@ -292,7 +330,9 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
|||||||
apiHandler.RegisterRoutes(r, am)
|
apiHandler.RegisterRoutes(r, am)
|
||||||
apiHandler.RegisterMetricsRoutes(r, am)
|
apiHandler.RegisterMetricsRoutes(r, am)
|
||||||
apiHandler.RegisterLogsRoutes(r, am)
|
apiHandler.RegisterLogsRoutes(r, am)
|
||||||
|
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||||
|
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||||
|
|
||||||
c := cors.New(cors.Options{
|
c := cors.New(cors.Options{
|
||||||
AllowedOrigins: []string{"*"},
|
AllowedOrigins: []string{"*"},
|
||||||
@@ -353,20 +393,20 @@ func (lrw *loggingResponseWriter) Flush() {
|
|||||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
pathToExtractBodyFrom := "/api/v3/query_range"
|
||||||
|
|
||||||
data := map[string]interface{}{}
|
data := map[string]interface{}{}
|
||||||
var postData *basemodel.QueryRangeParamsV2
|
var postData *v3.QueryRangeParamsV3
|
||||||
|
|
||||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
bodyBytes, err := ioutil.ReadAll(r.Body)
|
bodyBytes, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
r.Body.Close() // must close
|
r.Body.Close() // must close
|
||||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
json.Unmarshal(bodyBytes, &postData)
|
json.Unmarshal(bodyBytes, &postData)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -377,24 +417,37 @@ func extractDashboardMetaData(path string, r *http.Request) (map[string]interfac
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
signozMetricNotFound := false
|
signozMetricsUsed := false
|
||||||
|
signozLogsUsed := false
|
||||||
|
signozTracesUsed := false
|
||||||
if postData != nil {
|
if postData != nil {
|
||||||
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
|
|
||||||
|
|
||||||
if postData.CompositeMetricQuery != nil {
|
if postData.CompositeQuery != nil {
|
||||||
data["queryType"] = postData.CompositeMetricQuery.QueryType
|
data["queryType"] = postData.CompositeQuery.QueryType
|
||||||
data["panelType"] = postData.CompositeMetricQuery.PanelType
|
data["panelType"] = postData.CompositeQuery.PanelType
|
||||||
|
|
||||||
|
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||||
}
|
}
|
||||||
|
|
||||||
data["datasource"] = postData.DataSource
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if signozMetricNotFound {
|
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||||
telemetry.GetInstance().AddActiveMetricsUser()
|
if signozMetricsUsed {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
|
telemetry.GetInstance().AddActiveMetricsUser()
|
||||||
|
}
|
||||||
|
if signozLogsUsed {
|
||||||
|
telemetry.GetInstance().AddActiveLogsUser()
|
||||||
|
}
|
||||||
|
if signozTracesUsed {
|
||||||
|
telemetry.GetInstance().AddActiveTracesUser()
|
||||||
|
}
|
||||||
|
data["metricsUsed"] = signozMetricsUsed
|
||||||
|
data["logsUsed"] = signozLogsUsed
|
||||||
|
data["tracesUsed"] = signozTracesUsed
|
||||||
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, true
|
return data, true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -414,10 +467,12 @@ func getActiveLogs(path string, r *http.Request) {
|
|||||||
|
|
||||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := baseauth.AttachJwtToContext(r.Context(), r)
|
||||||
|
r = r.WithContext(ctx)
|
||||||
route := mux.CurrentRoute(r)
|
route := mux.CurrentRoute(r)
|
||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
|
|
||||||
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
|
queryRangeV3data, metadataExists := extractQueryRangeV3Data(path, r)
|
||||||
getActiveLogs(path, r)
|
getActiveLogs(path, r)
|
||||||
|
|
||||||
lrw := NewLoggingResponseWriter(w)
|
lrw := NewLoggingResponseWriter(w)
|
||||||
@@ -425,13 +480,16 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
|||||||
|
|
||||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||||
if metadataExists {
|
if metadataExists {
|
||||||
for key, value := range dashboardMetadata {
|
for key, value := range queryRangeV3data {
|
||||||
data[key] = value
|
data[key] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||||
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
@@ -467,7 +525,7 @@ func (s *Server) initListeners() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
||||||
|
|
||||||
// listen on private port to support internal services
|
// listen on private port to support internal services
|
||||||
privateHostPort := s.serverOptions.PrivateHostPort
|
privateHostPort := s.serverOptions.PrivateHostPort
|
||||||
@@ -480,7 +538,7 @@ func (s *Server) initListeners() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -492,7 +550,7 @@ func (s *Server) Start() error {
|
|||||||
if !s.serverOptions.DisableRules {
|
if !s.serverOptions.DisableRules {
|
||||||
s.ruleManager.Start()
|
s.ruleManager.Start()
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.initListeners()
|
err := s.initListeners()
|
||||||
@@ -506,23 +564,23 @@ func (s *Server) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
||||||
|
|
||||||
switch err := s.httpServer.Serve(s.httpConn); err {
|
switch err := s.httpServer.Serve(s.httpConn); err {
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||||
// normal exit, nothing to do
|
// normal exit, nothing to do
|
||||||
default:
|
default:
|
||||||
zap.S().Error("Could not start HTTP server", zap.Error(err))
|
zap.L().Error("Could not start HTTP server", zap.Error(err))
|
||||||
}
|
}
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
zap.L().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
||||||
|
|
||||||
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Could not start pprof server", zap.Error(err))
|
zap.L().Error("Could not start pprof server", zap.Error(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -532,14 +590,14 @@ func (s *Server) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||||
|
|
||||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||||
// normal exit, nothing to do
|
// normal exit, nothing to do
|
||||||
zap.S().Info("private http server closed")
|
zap.L().Info("private http server closed")
|
||||||
default:
|
default:
|
||||||
zap.S().Error("Could not start private HTTP server", zap.Error(err))
|
zap.L().Error("Could not start private HTTP server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
@@ -547,10 +605,10 @@ func (s *Server) Start() error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||||
err := opamp.InitalizeServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
|
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Info("opamp ws server failed to start", err)
|
zap.L().Error("opamp ws server failed to start", zap.Error(err))
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -571,7 +629,7 @@ func (s *Server) Stop() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opamp.StopServer()
|
s.opampServer.Stop()
|
||||||
|
|
||||||
if s.ruleManager != nil {
|
if s.ruleManager != nil {
|
||||||
s.ruleManager.Stop()
|
s.ruleManager.Stop()
|
||||||
@@ -626,7 +684,7 @@ func makeRulesManager(
|
|||||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("rules manager is ready")
|
zap.L().Info("rules manager is ready")
|
||||||
|
|
||||||
return manager, nil
|
return manager, nil
|
||||||
}
|
}
|
||||||
|
|||||||
56
ee/query-service/auth/auth.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
|
||||||
|
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
||||||
|
if len(patToken) > 0 {
|
||||||
|
zap.L().Debug("Received a non-zero length PAT token")
|
||||||
|
ctx := context.Background()
|
||||||
|
dao := apiHandler.AppDao()
|
||||||
|
|
||||||
|
pat, err := dao.GetPAT(ctx, patToken)
|
||||||
|
if err == nil && pat != nil {
|
||||||
|
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
|
||||||
|
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||||
|
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
|
||||||
|
return nil, fmt.Errorf("PAT has expired")
|
||||||
|
}
|
||||||
|
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
user, err := dao.GetUser(ctx, pat.UserID)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
telemetry.GetInstance().SetPatTokenUser()
|
||||||
|
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
|
||||||
|
user.User.GroupId = group.Id
|
||||||
|
user.User.Id = pat.Id
|
||||||
|
return &basemodel.UserPayload{
|
||||||
|
User: user.User,
|
||||||
|
Role: pat.Role,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return baseauth.GetUserFromRequest(r)
|
||||||
|
}
|
||||||
@@ -9,7 +9,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||||
|
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||||
|
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||||
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
||||||
|
|
||||||
func GetOrDefaultEnv(key string, fallback string) string {
|
func GetOrDefaultEnv(key string, fallback string) string {
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ type ModelDao interface {
|
|||||||
DB() *sqlx.DB
|
DB() *sqlx.DB
|
||||||
|
|
||||||
// auth methods
|
// auth methods
|
||||||
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
|
|
||||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
||||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||||
@@ -34,10 +33,12 @@ type ModelDao interface {
|
|||||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||||
|
|
||||||
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
|
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
||||||
|
UpdatePAT(ctx context.Context, p model.PAT, id string) (basemodel.BaseApiError)
|
||||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||||
|
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
|
||||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
||||||
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError)
|
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
|
||||||
DeletePAT(ctx context.Context, id string) basemodel.BaseApiError
|
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,35 +5,93 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
||||||
|
// get auth domain from email domain
|
||||||
|
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
|
|
||||||
|
if apierr != nil {
|
||||||
|
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||||
|
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||||
|
}
|
||||||
|
|
||||||
|
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||||
|
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
|
||||||
|
user := &basemodel.User{
|
||||||
|
Id: uuid.NewString(),
|
||||||
|
Name: "",
|
||||||
|
Email: email,
|
||||||
|
Password: hash,
|
||||||
|
CreatedAt: time.Now().Unix(),
|
||||||
|
ProfilePictureURL: "", // Currently unused
|
||||||
|
GroupId: group.Id,
|
||||||
|
OrgId: domain.OrgId,
|
||||||
|
}
|
||||||
|
|
||||||
|
user, apiErr = m.CreateUser(ctx, user, false)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.L().Error("CreateUser failed", zap.Error(apiErr))
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return user, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||||
// is successfully parsed (i.e. valid email is available)
|
// is successfully parsed (i.e. valid email is available)
|
||||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||||
|
|
||||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||||
if !apierr.IsNil() {
|
if !apierr.IsNil() {
|
||||||
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
|
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
|
||||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
|
user := &basemodel.User{}
|
||||||
|
|
||||||
|
if userPayload == nil {
|
||||||
|
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||||
|
user = newUser
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
|
||||||
|
return "", apiErr
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user = &userPayload.User
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to generate token for SSO login user", err)
|
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
|
||||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||||
redirectUri,
|
redirectUri,
|
||||||
tokenStore.AccessJwt,
|
tokenStore.AccessJwt,
|
||||||
userPayload.User.Id,
|
user.Id,
|
||||||
tokenStore.RefreshJwt), nil
|
tokenStore.RefreshJwt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,10 +120,10 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
|||||||
|
|
||||||
// PrecheckLogin is called when the login or signup page is loaded
|
// PrecheckLogin is called when the login or signup page is loaded
|
||||||
// to check sso login is to be prompted
|
// to check sso login is to be prompted
|
||||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
|
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
||||||
|
|
||||||
// assume user is valid unless proven otherwise
|
// assume user is valid unless proven otherwise
|
||||||
resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||||
|
|
||||||
// check if email is a valid user
|
// check if email is a valid user
|
||||||
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
||||||
@@ -76,6 +134,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
if userPayload == nil {
|
if userPayload == nil {
|
||||||
resp.IsUser = false
|
resp.IsUser = false
|
||||||
}
|
}
|
||||||
|
|
||||||
ssoAvailable := true
|
ssoAvailable := true
|
||||||
err := m.checkFeature(model.SSO)
|
err := m.checkFeature(model.SSO)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -84,13 +143,15 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
// do nothing, just skip sso
|
// do nothing, just skip sso
|
||||||
ssoAvailable = false
|
ssoAvailable = false
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||||
return resp, model.BadRequest(err)
|
return resp, model.BadRequestStr(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ssoAvailable {
|
if ssoAvailable {
|
||||||
|
|
||||||
|
resp.IsUser = true
|
||||||
|
|
||||||
// find domain from email
|
// find domain from email
|
||||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
@@ -99,7 +160,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
if len(emailComponents) > 0 {
|
if len(emailComponents) > 0 {
|
||||||
emailDomain = emailComponents[1]
|
emailDomain = emailComponents[1]
|
||||||
}
|
}
|
||||||
zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
|
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||||
return resp, apierr
|
return resp, apierr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,7 +176,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||||
siteUrl, err := url.Parse(escapedUrl)
|
siteUrl, err := url.Parse(escapedUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse referer", err)
|
zap.L().Error("failed to parse referer", zap.Error(err))
|
||||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,7 +185,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
|
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
|
||||||
return resp, model.InternalError(err)
|
return resp, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -28,29 +28,70 @@ type StoredDomain struct {
|
|||||||
|
|
||||||
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
||||||
// user domain. The domain is further used to process validity of the response.
|
// user domain. The domain is further used to process validity of the response.
|
||||||
// when sending login request to IdP we send relay state as URL (site url)
|
// when sending login request to IdP we send relay state as URL (site url)
|
||||||
// with domainId as query parameter.
|
// with domainId or domainName as query parameter.
|
||||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
||||||
// derive domain id from relay state now
|
// derive domain id from relay state now
|
||||||
var domainIdStr string
|
var domainIdStr string
|
||||||
|
var domainNameStr string
|
||||||
|
var domain *model.OrgDomain
|
||||||
|
|
||||||
for k, v := range relayState.Query() {
|
for k, v := range relayState.Query() {
|
||||||
if k == "domainId" && len(v) > 0 {
|
if k == "domainId" && len(v) > 0 {
|
||||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||||
}
|
}
|
||||||
|
if k == "domainName" && len(v) > 0 {
|
||||||
|
domainNameStr = v[0]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
domainId, err := uuid.Parse(domainIdStr)
|
if domainIdStr != "" {
|
||||||
|
domainId, err := uuid.Parse(domainIdStr)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
|
||||||
|
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain, err = m.GetDomain(ctx, domainId)
|
||||||
|
if (err != nil) || domain == nil {
|
||||||
|
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
|
||||||
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if domainNameStr != "" {
|
||||||
|
|
||||||
|
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||||
|
domain = domainFromDB
|
||||||
|
if (err != nil) || domain == nil {
|
||||||
|
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
|
||||||
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if domain != nil {
|
||||||
|
return domain, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("failed to find domain received in IdP response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDomainByName returns org domain for a given domain name
|
||||||
|
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||||
|
|
||||||
|
stored := StoredDomain{}
|
||||||
|
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse domain id from relay state", err)
|
if err == sql.ErrNoRows {
|
||||||
return nil, fmt.Errorf("failed to parse response from IdP response")
|
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
|
||||||
|
}
|
||||||
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
domain, err := m.GetDomain(ctx, domainId)
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if (err != nil) || domain == nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
|
return nil, model.InternalError(err)
|
||||||
return nil, fmt.Errorf("invalid credentials")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +110,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
|
|||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return domain, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
@@ -91,7 +132,7 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
|
|||||||
for _, s := range stored {
|
for _, s := range stored {
|
||||||
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
||||||
if err := domain.LoadConfig(s.Data); err != nil {
|
if err := domain.LoadConfig(s.Data); err != nil {
|
||||||
zap.S().Errorf("ListDomains() failed", zap.Error(err))
|
zap.L().Error("ListDomains() failed", zap.Error(err))
|
||||||
}
|
}
|
||||||
domains = append(domains, domain)
|
domains = append(domains, domain)
|
||||||
}
|
}
|
||||||
@@ -112,7 +153,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
|
|
||||||
configJson, err := json.Marshal(domain)
|
configJson, err := json.Marshal(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
|
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,7 +167,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
time.Now().Unix())
|
time.Now().Unix())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to insert domain in db", zap.Error(err))
|
zap.L().Error("failed to insert domain in db", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,13 +178,13 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||||
|
|
||||||
if domain.Id == uuid.Nil {
|
if domain.Id == uuid.Nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
configJson, err := json.Marshal(domain)
|
configJson, err := json.Marshal(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
zap.L().Error("domain update failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +195,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
domain.Id)
|
domain.Id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
zap.L().Error("domain update failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -165,7 +206,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
|
|||||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
||||||
|
|
||||||
if id == uuid.Nil {
|
if id == uuid.Nil {
|
||||||
zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +215,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
|
|||||||
id)
|
id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("domain delete failed", zap.Error(err))
|
zap.L().Error("domain delete failed", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,7 +247,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
|||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return domain, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type modelDao struct {
|
type modelDao struct {
|
||||||
@@ -28,6 +29,41 @@ func (m *modelDao) checkFeature(key string) error {
|
|||||||
return m.flags.CheckFeature(key)
|
return m.flags.CheckFeature(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||||
|
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
|
||||||
|
rows, err := db.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to query table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
cid int
|
||||||
|
name string
|
||||||
|
ctype string
|
||||||
|
notnull int
|
||||||
|
dflt_value *string
|
||||||
|
pk int
|
||||||
|
)
|
||||||
|
for rows.Next() {
|
||||||
|
err := rows.Scan(&cid, &name, &ctype, ¬null, &dflt_value, &pk)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if name == columnName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = rows.Err()
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to scan table info", zap.Error(err))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// InitDB creates and extends base model DB repository
|
// InitDB creates and extends base model DB repository
|
||||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||||
dao, err := basedsql.InitDB(dataSourceName)
|
dao, err := basedsql.InitDB(dataSourceName)
|
||||||
@@ -51,11 +87,16 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
|||||||
);
|
);
|
||||||
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
role TEXT NOT NULL,
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
token TEXT NOT NULL UNIQUE,
|
token TEXT NOT NULL UNIQUE,
|
||||||
name TEXT NOT NULL,
|
name TEXT NOT NULL,
|
||||||
created_at INTEGER NOT NULL,
|
created_at INTEGER NOT NULL,
|
||||||
expires_at INTEGER NOT NULL,
|
expires_at INTEGER NOT NULL,
|
||||||
|
updated_at INTEGER NOT NULL,
|
||||||
|
last_used INTEGER NOT NULL,
|
||||||
|
revoked BOOLEAN NOT NULL,
|
||||||
|
updated_by_user_id TEXT NOT NULL,
|
||||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||||
);
|
);
|
||||||
`
|
`
|
||||||
@@ -65,6 +106,36 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
|||||||
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "role") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
|
||||||
|
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error in adding column: %v", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,42 +3,134 @@ package sqlite
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
|
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
||||||
_, err := m.DB().ExecContext(ctx,
|
result, err := m.DB().ExecContext(ctx,
|
||||||
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||||
p.UserID,
|
p.UserID,
|
||||||
p.Token,
|
p.Token,
|
||||||
|
p.Role,
|
||||||
p.Name,
|
p.Name,
|
||||||
p.CreatedAt,
|
p.CreatedAt,
|
||||||
p.ExpiresAt)
|
p.ExpiresAt,
|
||||||
|
p.UpdatedAt,
|
||||||
|
p.UpdatedByUserID,
|
||||||
|
p.LastUsed,
|
||||||
|
p.Revoked,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("PAT insertion failed"))
|
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
|
}
|
||||||
|
id, err := result.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||||
|
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
|
}
|
||||||
|
p.Id = strconv.Itoa(int(id))
|
||||||
|
createdByUser, _ := m.GetUser(ctx, p.UserID)
|
||||||
|
if createdByUser == nil {
|
||||||
|
p.CreatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.CreatedByUser = model.User{
|
||||||
|
Id: createdByUser.Id,
|
||||||
|
Name: createdByUser.Name,
|
||||||
|
Email: createdByUser.Email,
|
||||||
|
CreatedAt: createdByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError {
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
|
||||||
|
p.Role,
|
||||||
|
p.Name,
|
||||||
|
p.UpdatedAt,
|
||||||
|
p.UpdatedByUserID,
|
||||||
|
id)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
|
||||||
|
return model.InternalError(fmt.Errorf("PAT update failed"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError {
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
|
||||||
|
lastUsed,
|
||||||
|
token)
|
||||||
|
if err != nil {
|
||||||
|
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
|
||||||
|
return model.InternalError(fmt.Errorf("PAT last used update failed"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=?;`, userID); err != nil {
|
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
|
||||||
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err))
|
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||||
}
|
}
|
||||||
|
for i := range pats {
|
||||||
|
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
|
||||||
|
if createdByUser == nil {
|
||||||
|
pats[i].CreatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pats[i].CreatedByUser = model.User{
|
||||||
|
Id: createdByUser.Id,
|
||||||
|
Name: createdByUser.Name,
|
||||||
|
Email: createdByUser.Email,
|
||||||
|
CreatedAt: createdByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: createdByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
|
||||||
|
if updatedByUser == nil {
|
||||||
|
pats[i].UpdatedByUser = model.User{
|
||||||
|
NotFound: true,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pats[i].UpdatedByUser = model.User{
|
||||||
|
Id: updatedByUser.Id,
|
||||||
|
Name: updatedByUser.Name,
|
||||||
|
Email: updatedByUser.Email,
|
||||||
|
CreatedAt: updatedByUser.CreatedAt,
|
||||||
|
ProfilePictureURL: updatedByUser.ProfilePictureURL,
|
||||||
|
NotFound: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return pats, nil
|
return pats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiError {
|
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError {
|
||||||
_, err := m.DB().ExecContext(ctx, `DELETE from personal_access_tokens where id=?;`, id)
|
updatedAt := time.Now().Unix()
|
||||||
|
_, err := m.DB().ExecContext(ctx,
|
||||||
|
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
|
||||||
|
userID, updatedAt, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to delete PAT, err: %v", zap.Error(err))
|
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("failed to delete PAT"))
|
return model.InternalError(fmt.Errorf("PAT revoke failed"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -46,7 +138,7 @@ func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiEr
|
|||||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=?;`, token); err != nil {
|
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +155,7 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
|
|||||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||||
pats := []model.PAT{}
|
pats := []model.PAT{}
|
||||||
|
|
||||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=?;`, id); err != nil {
|
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,6 +169,7 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
|
|||||||
return &pats[0], nil
|
return &pats[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deprecated
|
||||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
||||||
users := []basemodel.UserPayload{}
|
users := []basemodel.UserPayload{}
|
||||||
|
|
||||||
@@ -90,7 +183,7 @@ func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.U
|
|||||||
u.org_id,
|
u.org_id,
|
||||||
u.group_id
|
u.group_id
|
||||||
FROM users u, personal_access_tokens p
|
FROM users u, personal_access_tokens p
|
||||||
WHERE u.id = p.user_id and p.token=?;`
|
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
|
||||||
|
|
||||||
if err := m.DB().Select(&users, query, token); err != nil {
|
if err := m.DB().Select(&users, query, token); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var C *Client
|
var C *Client
|
||||||
@@ -47,13 +47,13 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to connect to license.signoz.io", err)
|
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
||||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
httpBody, err := ioutil.ReadAll(httpResponse.Body)
|
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
|
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
||||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
result := ActivationResult{}
|
result := ActivationResult{}
|
||||||
err = json.Unmarshal(httpBody, &result)
|
err = json.Unmarshal(httpBody, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
|
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
||||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -91,7 +91,7 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
|
|||||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(response.Body)
|
body, err := io.ReadAll(response.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
|||||||
l.ValidationMessage)
|
l.ValidationMessage)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error in inserting license data: ", zap.Error(err))
|
zap.L().Error("error in inserting license data: ", zap.Error(err))
|
||||||
return fmt.Errorf("failed to insert license in db: %v", err)
|
return fmt.Errorf("failed to insert license in db: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,7 +121,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
|||||||
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error in updating license: ", zap.Error(err))
|
zap.L().Error("error in updating license: ", zap.Error(err))
|
||||||
return fmt.Errorf("failed to update license in db: %v", err)
|
return fmt.Errorf("failed to update license in db: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
|
||||||
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||||
@@ -99,7 +100,7 @@ func (lm *Manager) SetActive(l *model.License) {
|
|||||||
|
|
||||||
err := lm.InitFeatures(lm.activeFeatures)
|
err := lm.InitFeatures(lm.activeFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Panicf("Couldn't activate features: %v", err)
|
zap.L().Panic("Couldn't activate features", zap.Error(err))
|
||||||
}
|
}
|
||||||
if !lm.validatorRunning {
|
if !lm.validatorRunning {
|
||||||
// we want to make sure only one validator runs,
|
// we want to make sure only one validator runs,
|
||||||
@@ -124,13 +125,13 @@ func (lm *Manager) LoadActiveLicense() error {
|
|||||||
if active != nil {
|
if active != nil {
|
||||||
lm.SetActive(active)
|
lm.SetActive(active)
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("No active license found, defaulting to basic plan")
|
zap.L().Info("No active license found, defaulting to basic plan")
|
||||||
// if no active license is found, we default to basic(free) plan with all default features
|
// if no active license is found, we default to basic(free) plan with all default features
|
||||||
lm.activeFeatures = model.BasicPlan
|
lm.activeFeatures = model.BasicPlan
|
||||||
setDefaultFeatures(lm)
|
setDefaultFeatures(lm)
|
||||||
err := lm.InitFeatures(lm.activeFeatures)
|
err := lm.InitFeatures(lm.activeFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Error("Couldn't initialize features: ", err)
|
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -190,7 +191,7 @@ func (lm *Manager) Validator(ctx context.Context) {
|
|||||||
|
|
||||||
// Validate validates the current active license
|
// Validate validates the current active license
|
||||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||||
zap.S().Info("License validation started")
|
zap.L().Info("License validation started")
|
||||||
if lm.activeLicense == nil {
|
if lm.activeLicense == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -200,12 +201,12 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
|
|
||||||
lm.lastValidated = time.Now().Unix()
|
lm.lastValidated = time.Now().Unix()
|
||||||
if reterr != nil {
|
if reterr != nil {
|
||||||
zap.S().Errorf("License validation completed with error", reterr)
|
zap.L().Error("License validation completed with error", zap.Error(reterr))
|
||||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||||
map[string]interface{}{"err": reterr.Error()})
|
map[string]interface{}{"err": reterr.Error()}, "", true, false)
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("License validation completed with no errors")
|
zap.L().Info("License validation completed with no errors")
|
||||||
}
|
}
|
||||||
|
|
||||||
lm.mutex.Unlock()
|
lm.mutex.Unlock()
|
||||||
@@ -213,7 +214,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
|
|
||||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.S().Errorf("failed to validate license", apiError)
|
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||||
return apiError.Err
|
return apiError.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -234,7 +235,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := l.ParsePlan(); err != nil {
|
if err := l.ParsePlan(); err != nil {
|
||||||
zap.S().Errorf("failed to parse updated license", zap.Error(err))
|
zap.L().Error("failed to parse updated license", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,7 +245,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// unexpected db write issue but we can let the user continue
|
// unexpected db write issue but we can let the user continue
|
||||||
// and wait for update to work in next cycle.
|
// and wait for update to work in next cycle.
|
||||||
zap.S().Errorf("failed to validate license", zap.Error(err))
|
zap.L().Error("failed to validate license", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,14 +260,17 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if errResponse != nil {
|
if errResponse != nil {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||||
map[string]interface{}{"err": errResponse.Err.Error()})
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||||
|
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
response, apiError := validate.ActivateLicense(key, "")
|
response, apiError := validate.ActivateLicense(key, "")
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
|
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
|
||||||
return nil, apiError
|
return nil, apiError
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,14 +284,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
|||||||
err := l.ParsePlan()
|
err := l.ParsePlan()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// store the license before activating it
|
// store the license before activating it
|
||||||
err = lm.repo.InsertLicense(ctx, l)
|
err = lm.repo.InsertLicense(ctx, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
zap.L().Error("failed to activate license", zap.Error(err))
|
||||||
return nil, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,10 +14,10 @@ import (
|
|||||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||||
"go.signoz.io/signoz/ee/query-service/app"
|
"go.signoz.io/signoz/ee/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
"go.signoz.io/signoz/pkg/query-service/version"
|
"go.signoz.io/signoz/pkg/query-service/version"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
|
||||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||||
@@ -27,18 +27,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||||
config := zap.NewDevelopmentConfig()
|
config := zap.NewProductionConfig()
|
||||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
|
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
|
||||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||||
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
|
|
||||||
defaultLogLevel := zapcore.DebugLevel
|
|
||||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
|
||||||
config.EncoderConfig.TimeKey = "timestamp"
|
config.EncoderConfig.TimeKey = "timestamp"
|
||||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
|
||||||
|
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||||
|
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
|
||||||
|
defaultLogLevel := zapcore.InfoLevel
|
||||||
|
|
||||||
res := resource.NewWithAttributes(
|
res := resource.NewWithAttributes(
|
||||||
semconv.SchemaURL,
|
semconv.SchemaURL,
|
||||||
semconv.ServiceNameKey.String("query-service"),
|
semconv.ServiceNameKey.String("query-service"),
|
||||||
@@ -48,14 +49,15 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
|||||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||||
)
|
)
|
||||||
|
|
||||||
if enableQueryServiceLogOTLPExport == true {
|
if enableQueryServiceLogOTLPExport {
|
||||||
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
|
ctx, _ := context.WithTimeout(ctx, time.Second*30)
|
||||||
|
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
|
log.Fatalf("failed to establish connection: %v", err)
|
||||||
} else {
|
} else {
|
||||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logExportBatchSizeInt = 1000
|
logExportBatchSizeInt = 512
|
||||||
}
|
}
|
||||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||||
BatchSize: logExportBatchSizeInt,
|
BatchSize: logExportBatchSizeInt,
|
||||||
@@ -81,7 +83,9 @@ func main() {
|
|||||||
|
|
||||||
// the url used to build link in the alert messages in slack and other systems
|
// the url used to build link in the alert messages in slack and other systems
|
||||||
var ruleRepoURL string
|
var ruleRepoURL string
|
||||||
|
var cluster string
|
||||||
|
|
||||||
|
var cacheConfigPath, fluxInterval string
|
||||||
var enableQueryServiceLogOTLPExport bool
|
var enableQueryServiceLogOTLPExport bool
|
||||||
var preferDelta bool
|
var preferDelta bool
|
||||||
var preferSpanMetrics bool
|
var preferSpanMetrics bool
|
||||||
@@ -99,14 +103,18 @@ func main() {
|
|||||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
|
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||||
|
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
||||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||||
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||||
|
|
||||||
zap.ReplaceGlobals(loggerMgr)
|
zap.ReplaceGlobals(loggerMgr)
|
||||||
defer loggerMgr.Sync() // flushes buffer, if any
|
defer loggerMgr.Sync() // flushes buffer, if any
|
||||||
|
|
||||||
logger := loggerMgr.Sugar()
|
|
||||||
version.PrintVersion()
|
version.PrintVersion()
|
||||||
|
|
||||||
serverOptions := &app.ServerOptions{
|
serverOptions := &app.ServerOptions{
|
||||||
@@ -121,28 +129,31 @@ func main() {
|
|||||||
MaxIdleConns: maxIdleConns,
|
MaxIdleConns: maxIdleConns,
|
||||||
MaxOpenConns: maxOpenConns,
|
MaxOpenConns: maxOpenConns,
|
||||||
DialTimeout: dialTimeout,
|
DialTimeout: dialTimeout,
|
||||||
|
CacheConfigPath: cacheConfigPath,
|
||||||
|
FluxInterval: fluxInterval,
|
||||||
|
Cluster: cluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the jwt secret key
|
// Read the jwt secret key
|
||||||
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
||||||
|
|
||||||
if len(auth.JwtSecret) == 0 {
|
if len(auth.JwtSecret) == 0 {
|
||||||
zap.S().Warn("No JWT secret key is specified.")
|
zap.L().Warn("No JWT secret key is specified.")
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("No JWT secret key set successfully.")
|
zap.L().Info("JWT secret key set successfully.")
|
||||||
}
|
}
|
||||||
|
|
||||||
server, err := app.NewServer(serverOptions)
|
server, err := app.NewServer(serverOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal("Failed to create server", zap.Error(err))
|
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := server.Start(); err != nil {
|
if err := server.Start(); err != nil {
|
||||||
logger.Fatal("Could not start servers", zap.Error(err))
|
zap.L().Fatal("Could not start server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := auth.InitAuthCache(context.Background()); err != nil {
|
if err := auth.InitAuthCache(context.Background()); err != nil {
|
||||||
logger.Fatal("Failed to initialize auth cache", zap.Error(err))
|
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
signalsChannel := make(chan os.Signal, 1)
|
signalsChannel := make(chan os.Signal, 1)
|
||||||
@@ -151,9 +162,9 @@ func main() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case status := <-server.HealthCheckStatus():
|
case status := <-server.HealthCheckStatus():
|
||||||
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
||||||
case <-signalsChannel:
|
case <-signalsChannel:
|
||||||
logger.Fatal("Received OS Interrupt Signal ... ")
|
zap.L().Fatal("Received OS Interrupt Signal ... ")
|
||||||
server.Stop()
|
server.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,18 +4,9 @@ import (
|
|||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PrecheckResponse contains login precheck response
|
|
||||||
type PrecheckResponse struct {
|
|
||||||
SSO bool `json:"sso"`
|
|
||||||
SsoUrl string `json:"ssoUrl"`
|
|
||||||
CanSelfRegister bool `json:"canSelfRegister"`
|
|
||||||
IsUser bool `json:"isUser"`
|
|
||||||
SsoError string `json:"ssoError"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GettableInvitation overrides base object and adds precheck into
|
// GettableInvitation overrides base object and adds precheck into
|
||||||
// response
|
// response
|
||||||
type GettableInvitation struct {
|
type GettableInvitation struct {
|
||||||
*basemodel.InvitationResponseObject
|
*basemodel.InvitationResponseObject
|
||||||
Precheck *PrecheckResponse `json:"precheck"`
|
Precheck *basemodel.PrecheckResponse `json:"precheck"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
saml2 "github.com/russellhaering/gosaml2"
|
saml2 "github.com/russellhaering/gosaml2"
|
||||||
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
|
||||||
"go.signoz.io/signoz/ee/query-service/sso"
|
"go.signoz.io/signoz/ee/query-service/sso"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
@@ -24,16 +24,16 @@ const (
|
|||||||
|
|
||||||
// OrgDomain identify org owned web domains for auth and other purposes
|
// OrgDomain identify org owned web domains for auth and other purposes
|
||||||
type OrgDomain struct {
|
type OrgDomain struct {
|
||||||
Id uuid.UUID `json:"id"`
|
Id uuid.UUID `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
OrgId string `json:"orgId"`
|
OrgId string `json:"orgId"`
|
||||||
SsoEnabled bool `json:"ssoEnabled"`
|
SsoEnabled bool `json:"ssoEnabled"`
|
||||||
SsoType SSOType `json:"ssoType"`
|
SsoType SSOType `json:"ssoType"`
|
||||||
|
|
||||||
SamlConfig *SamlConfig `json:"samlConfig"`
|
SamlConfig *SamlConfig `json:"samlConfig"`
|
||||||
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
||||||
|
|
||||||
Org *basemodel.Organization
|
Org *basemodel.Organization
|
||||||
}
|
}
|
||||||
|
|
||||||
func (od *OrgDomain) String() string {
|
func (od *OrgDomain) String() string {
|
||||||
@@ -100,8 +100,8 @@ func (od *OrgDomain) GetSAMLCert() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
||||||
// requesting OAuth and also used in processing response from google
|
// requesting OAuth and also used in processing response from google
|
||||||
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
||||||
if od.GoogleAuthConfig == nil {
|
if od.GoogleAuthConfig == nil {
|
||||||
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
|
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
|
||||||
@@ -137,38 +137,36 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||||
|
|
||||||
|
|
||||||
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
||||||
|
|
||||||
// build redirect url from window.location sent by frontend
|
// build redirect url from window.location sent by frontend
|
||||||
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
||||||
|
|
||||||
// prepare state that gets relayed back when the auth provider
|
// prepare state that gets relayed back when the auth provider
|
||||||
// calls back our url. here we pass the app url (where signoz runs)
|
// calls back our url. here we pass the app url (where signoz runs)
|
||||||
// and the domain Id. The domain Id helps in identifying sso config
|
// and the domain Id. The domain Id helps in identifying sso config
|
||||||
// when the call back occurs and the app url is useful in redirecting user
|
// when the call back occurs and the app url is useful in redirecting user
|
||||||
// back to the right path.
|
// back to the right path.
|
||||||
// why do we need to pass app url? the callback typically is handled by backend
|
// why do we need to pass app url? the callback typically is handled by backend
|
||||||
// and sometimes backend might right at a different port or is unaware of frontend
|
// and sometimes backend might right at a different port or is unaware of frontend
|
||||||
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
||||||
// along with frontend window.location and use it to relay the information through
|
// along with frontend window.location and use it to relay the information through
|
||||||
// auth provider to the backend (HandleCallback or HandleSSO method).
|
// auth provider to the backend (HandleCallback or HandleSSO method).
|
||||||
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
||||||
|
|
||||||
|
|
||||||
switch (od.SsoType) {
|
switch od.SsoType {
|
||||||
case SAML:
|
case SAML:
|
||||||
|
|
||||||
sp, err := od.PrepareSamlRequest(siteUrl)
|
sp, err := od.PrepareSamlRequest(siteUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sp.BuildAuthURL(relayState)
|
return sp.BuildAuthURL(relayState)
|
||||||
|
|
||||||
case GoogleAuth:
|
case GoogleAuth:
|
||||||
|
|
||||||
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -176,9 +174,8 @@ func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
|||||||
return googleProvider.BuildAuthURL(relayState)
|
return googleProvider.BuildAuthURL(relayState)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
||||||
return "", fmt.Errorf("unsupported SSO config for the domain")
|
return "", fmt.Errorf("unsupported SSO config for the domain")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package model
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,7 +62,6 @@ func InternalError(err error) *ApiError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// InternalErrorStr returns a ApiError object of internal type for string input
|
// InternalErrorStr returns a ApiError object of internal type for string input
|
||||||
func InternalErrorStr(s string) *ApiError {
|
func InternalErrorStr(s string) *ApiError {
|
||||||
return &ApiError{
|
return &ApiError{
|
||||||
@@ -69,6 +69,7 @@ func InternalErrorStr(s string) *ApiError {
|
|||||||
Err: fmt.Errorf(s),
|
Err: fmt.Errorf(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrorNone basemodel.ErrorType = ""
|
ErrorNone basemodel.ErrorType = ""
|
||||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
ErrorTimeout basemodel.ErrorType = "timeout"
|
||||||
|
|||||||
@@ -89,3 +89,18 @@ func (l *License) ParseFeatures() {
|
|||||||
l.FeatureSet = BasicPlan
|
l.FeatureSet = BasicPlan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Licenses struct {
|
||||||
|
TrialStart int64 `json:"trialStart"`
|
||||||
|
TrialEnd int64 `json:"trialEnd"`
|
||||||
|
OnTrial bool `json:"onTrial"`
|
||||||
|
WorkSpaceBlock bool `json:"workSpaceBlock"`
|
||||||
|
TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
|
||||||
|
GracePeriodEnd int64 `json:"gracePeriodEnd"`
|
||||||
|
Licenses []License `json:"licenses"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubscriptionServerResp struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data Licenses `json:"data"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,32 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
type PAT struct {
|
type User struct {
|
||||||
Id string `json:"id" db:"id"`
|
Id string `json:"id" db:"id"`
|
||||||
UserID string `json:"userId" db:"user_id"`
|
Name string `json:"name" db:"name"`
|
||||||
Token string `json:"token" db:"token"`
|
Email string `json:"email" db:"email"`
|
||||||
Name string `json:"name" db:"name"`
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
ProfilePictureURL string `json:"profilePictureURL" db:"profile_picture_url"`
|
||||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
|
NotFound bool `json:"notFound"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreatePATRequestBody struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Role string `json:"role"`
|
||||||
|
ExpiresInDays int64 `json:"expiresInDays"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PAT struct {
|
||||||
|
Id string `json:"id" db:"id"`
|
||||||
|
UserID string `json:"userId" db:"user_id"`
|
||||||
|
CreatedByUser User `json:"createdByUser"`
|
||||||
|
UpdatedByUser User `json:"updatedByUser"`
|
||||||
|
Token string `json:"token" db:"token"`
|
||||||
|
Role string `json:"role" db:"role"`
|
||||||
|
Name string `json:"name" db:"name"`
|
||||||
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
|
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
|
||||||
|
UpdatedAt int64 `json:"updatedAt" db:"updated_at"`
|
||||||
|
LastUsed int64 `json:"lastUsed" db:"last_used"`
|
||||||
|
Revoked bool `json:"revoked" db:"revoked"`
|
||||||
|
UpdatedByUserID string `json:"updatedByUserId" db:"updated_by_user_id"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ const Basic = "BASIC_PLAN"
|
|||||||
const Pro = "PRO_PLAN"
|
const Pro = "PRO_PLAN"
|
||||||
const Enterprise = "ENTERPRISE_PLAN"
|
const Enterprise = "ENTERPRISE_PLAN"
|
||||||
const DisableUpsell = "DISABLE_UPSELL"
|
const DisableUpsell = "DISABLE_UPSELL"
|
||||||
|
const Onboarding = "ONBOARDING"
|
||||||
|
const ChatSupport = "CHAT_SUPPORT"
|
||||||
|
|
||||||
var BasicPlan = basemodel.FeatureSet{
|
var BasicPlan = basemodel.FeatureSet{
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
@@ -50,14 +52,14 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
Name: basemodel.QueryBuilderPanels,
|
Name: basemodel.QueryBuilderPanels,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 5,
|
UsageLimit: 20,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.QueryBuilderAlerts,
|
Name: basemodel.QueryBuilderAlerts,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 5,
|
UsageLimit: 10,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
@@ -81,6 +83,20 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelOpsgenie,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: false,
|
Active: false,
|
||||||
@@ -161,6 +177,20 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelOpsgenie,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: true,
|
Active: true,
|
||||||
@@ -241,6 +271,20 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelOpsgenie,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelEmail,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.AlertChannelMsTeams,
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
Active: true,
|
Active: true,
|
||||||
@@ -255,4 +299,18 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: Onboarding,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: ChatSupport,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ type Usage struct {
|
|||||||
TimeStamp time.Time `json:"timestamp"`
|
TimeStamp time.Time `json:"timestamp"`
|
||||||
Count int64 `json:"count"`
|
Count int64 `json:"count"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
|
OrgName string `json:"orgName"`
|
||||||
|
TenantId string `json:"tenantId"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UsageDB struct {
|
type UsageDB struct {
|
||||||
|
|||||||
@@ -102,6 +102,6 @@ func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*
|
|||||||
IDPCertificateStore: certStore,
|
IDPCertificateStore: certStore,
|
||||||
SPKeyStore: randomKeyStore,
|
SPKeyStore: randomKeyStore,
|
||||||
}
|
}
|
||||||
zap.S().Debugf("SAML request:", sp)
|
zap.L().Debug("SAML request", zap.Any("sp", sp))
|
||||||
return sp, nil
|
return sp, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@@ -11,10 +13,10 @@ import (
|
|||||||
"github.com/ClickHouse/clickhouse-go/v2"
|
"github.com/ClickHouse/clickhouse-go/v2"
|
||||||
"github.com/go-co-op/gocron"
|
"github.com/go-co-op/gocron"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
@@ -38,15 +40,29 @@ type Manager struct {
|
|||||||
licenseRepo *license.Repo
|
licenseRepo *license.Repo
|
||||||
|
|
||||||
scheduler *gocron.Scheduler
|
scheduler *gocron.Scheduler
|
||||||
|
|
||||||
|
modelDao dao.ModelDao
|
||||||
|
|
||||||
|
tenantID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||||
|
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
||||||
|
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
|
||||||
|
|
||||||
|
tenantID := ""
|
||||||
|
if len(hostNameRegexMatches) == 2 {
|
||||||
|
tenantID = hostNameRegexMatches[1]
|
||||||
|
tenantID = strings.TrimRight(tenantID, "-clickhouse")
|
||||||
|
}
|
||||||
|
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
// repository: repo,
|
// repository: repo,
|
||||||
clickhouseConn: clickhouseConn,
|
clickhouseConn: clickhouseConn,
|
||||||
licenseRepo: licenseRepo,
|
licenseRepo: licenseRepo,
|
||||||
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||||
|
modelDao: modelDao,
|
||||||
|
tenantID: tenantID,
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
@@ -75,12 +91,12 @@ func (lm *Manager) UploadUsage() {
|
|||||||
// check if license is present or not
|
// check if license is present or not
|
||||||
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
|
zap.L().Error("failed to get active license", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if license == nil {
|
if license == nil {
|
||||||
// we will not start the usage reporting if license is not present.
|
// we will not start the usage reporting if license is not present.
|
||||||
zap.S().Info("no license present, skipping usage reporting")
|
zap.L().Info("no license present, skipping usage reporting")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,7 +123,7 @@ func (lm *Manager) UploadUsage() {
|
|||||||
dbusages := []model.UsageDB{}
|
dbusages := []model.UsageDB{}
|
||||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||||
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
|
zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, u := range dbusages {
|
for _, u := range dbusages {
|
||||||
@@ -117,24 +133,33 @@ func (lm *Manager) UploadUsage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(usages) <= 0 {
|
if len(usages) <= 0 {
|
||||||
zap.S().Info("no snapshots to upload, skipping.")
|
zap.L().Info("no snapshots to upload, skipping.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("uploading usage data")
|
zap.L().Info("uploading usage data")
|
||||||
|
|
||||||
|
orgName := ""
|
||||||
|
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
|
||||||
|
if orgError != nil {
|
||||||
|
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
|
||||||
|
}
|
||||||
|
if len(orgNames) == 1 {
|
||||||
|
orgName = orgNames[0].Name
|
||||||
|
}
|
||||||
|
|
||||||
usagesPayload := []model.Usage{}
|
usagesPayload := []model.Usage{}
|
||||||
for _, usage := range usages {
|
for _, usage := range usages {
|
||||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
|
zap.L().Error("error while decrypting usage data: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
usageData := model.Usage{}
|
usageData := model.Usage{}
|
||||||
err = json.Unmarshal(usageDataBytes, &usageData)
|
err = json.Unmarshal(usageDataBytes, &usageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
|
zap.L().Error("error while unmarshalling usage data: %v", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,6 +167,8 @@ func (lm *Manager) UploadUsage() {
|
|||||||
usageData.ExporterID = usage.ExporterID
|
usageData.ExporterID = usage.ExporterID
|
||||||
usageData.Type = usage.Type
|
usageData.Type = usage.Type
|
||||||
usageData.Tenant = usage.Tenant
|
usageData.Tenant = usage.Tenant
|
||||||
|
usageData.OrgName = orgName
|
||||||
|
usageData.TenantId = lm.tenantID
|
||||||
usagesPayload = append(usagesPayload, usageData)
|
usagesPayload = append(usagesPayload, usageData)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,13 +184,13 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
for i := 1; i <= MaxRetries; i++ {
|
for i := 1; i <= MaxRetries; i++ {
|
||||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||||
if apiErr != nil && i == MaxRetries {
|
if apiErr != nil && i == MaxRetries {
|
||||||
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
zap.L().Error("retries stopped : %v", zap.Error(apiErr))
|
||||||
// not returning error here since it is captured in the failed count
|
// not returning error here since it is captured in the failed count
|
||||||
return
|
return
|
||||||
} else if apiErr != nil {
|
} else if apiErr != nil {
|
||||||
// sleeping for exponential backoff
|
// sleeping for exponential backoff
|
||||||
sleepDuration := RetryInterval * time.Duration(i)
|
sleepDuration := RetryInterval * time.Duration(i)
|
||||||
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
|
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err))
|
||||||
time.Sleep(sleepDuration)
|
time.Sleep(sleepDuration)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
@@ -174,7 +201,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
func (lm *Manager) Stop() {
|
func (lm *Manager) Stop() {
|
||||||
lm.scheduler.Stop()
|
lm.scheduler.Stop()
|
||||||
|
|
||||||
zap.S().Debug("sending usage data before shutting down")
|
zap.L().Info("sending usage data before shutting down")
|
||||||
// send usage before shutting down
|
// send usage before shutting down
|
||||||
lm.UploadUsage()
|
lm.UploadUsage()
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
node_modules
|
node_modules
|
||||||
.vscode
|
.vscode
|
||||||
build
|
|
||||||
.env
|
|
||||||
.git
|
.git
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ module.exports = {
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||||
|
'no-plusplus': 'off',
|
||||||
'jsx-a11y/label-has-associated-control': [
|
'jsx-a11y/label-has-associated-control': [
|
||||||
'error',
|
'error',
|
||||||
{
|
{
|
||||||
@@ -109,7 +110,6 @@ module.exports = {
|
|||||||
// eslint rules need to remove
|
// eslint rules need to remove
|
||||||
'@typescript-eslint/no-shadow': 'off',
|
'@typescript-eslint/no-shadow': 'off',
|
||||||
'import/no-cycle': 'off',
|
'import/no-cycle': 'off',
|
||||||
|
|
||||||
'prettier/prettier': [
|
'prettier/prettier': [
|
||||||
'error',
|
'error',
|
||||||
{},
|
{},
|
||||||
|
|||||||
@@ -2,3 +2,19 @@
|
|||||||
. "$(dirname "$0")/_/husky.sh"
|
. "$(dirname "$0")/_/husky.sh"
|
||||||
|
|
||||||
cd frontend && yarn run commitlint --edit $1
|
cd frontend && yarn run commitlint --edit $1
|
||||||
|
|
||||||
|
branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
|
||||||
|
color_red="$(tput setaf 1)"
|
||||||
|
bold="$(tput bold)"
|
||||||
|
reset="$(tput sgr0)"
|
||||||
|
|
||||||
|
if [ "$branch" = "main" ]; then
|
||||||
|
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$branch" = "develop" ]; then
|
||||||
|
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
6
frontend/.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Ignore artifacts:
|
||||||
|
build
|
||||||
|
coverage
|
||||||
|
|
||||||
|
# Ignore all MD files:
|
||||||
|
**/*.md
|
||||||
@@ -1,38 +1,17 @@
|
|||||||
# Builder stage
|
FROM nginx:1.25.2-alpine
|
||||||
FROM node:16.15.0 as builder
|
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|
||||||
ARG TARGETOS=linux
|
# Set working directory
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
WORKDIR /frontend
|
WORKDIR /frontend
|
||||||
|
|
||||||
# Copy the package.json and .yarnrc files prior to install dependencies
|
|
||||||
COPY package.json ./
|
|
||||||
# Copy lock file
|
|
||||||
COPY yarn.lock ./
|
|
||||||
COPY .yarnrc ./
|
|
||||||
|
|
||||||
# Install the dependencies and make the folder
|
|
||||||
RUN CI=1 yarn install
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build the project and copy the files
|
|
||||||
RUN yarn build
|
|
||||||
|
|
||||||
|
|
||||||
FROM nginx:1.24.0-alpine
|
|
||||||
|
|
||||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
# Remove default nginx index page
|
# Remove default nginx index page
|
||||||
RUN rm -rf /usr/share/nginx/html/*
|
RUN rm -rf /usr/share/nginx/html/*
|
||||||
|
|
||||||
# Copy from the stahg 1
|
# Copy custom nginx config and static files
|
||||||
COPY --from=builder /frontend/build /usr/share/nginx/html
|
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||||
|
COPY build /usr/share/nginx/html
|
||||||
|
|
||||||
EXPOSE 3301
|
EXPOSE 3301
|
||||||
|
|
||||||
|
|||||||
7
frontend/example.env
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
NODE_ENV="development"
|
||||||
|
BUNDLE_ANALYSER="true"
|
||||||
|
FRONTEND_API_ENDPOINT="http://localhost:3301/"
|
||||||
|
INTERCOM_APP_ID="intercom-app-id"
|
||||||
|
|
||||||
|
PLAYWRIGHT_TEST_BASE_URL="http://localhost:3301"
|
||||||
|
CI="1"
|
||||||
@@ -7,7 +7,7 @@ const config: Config.InitialOptions = {
|
|||||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
||||||
modulePathIgnorePatterns: ['dist'],
|
modulePathIgnorePatterns: ['dist'],
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||||
},
|
},
|
||||||
globals: {
|
globals: {
|
||||||
extensionsToTreatAsEsm: ['.ts'],
|
extensionsToTreatAsEsm: ['.ts'],
|
||||||
@@ -20,9 +20,11 @@ const config: Config.InitialOptions = {
|
|||||||
transform: {
|
transform: {
|
||||||
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
|
'^.+\\.(css|scss|sass|less)$': 'jest-preview/transforms/css',
|
||||||
|
'^(?!.*\\.(js|jsx|mjs|cjs|ts|tsx|css|json)$)': 'jest-preview/transforms/file',
|
||||||
},
|
},
|
||||||
transformIgnorePatterns: [
|
transformIgnorePatterns: [
|
||||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend)/)',
|
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
|
||||||
],
|
],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
|
|||||||
@@ -7,6 +7,10 @@
|
|||||||
*/
|
*/
|
||||||
import '@testing-library/jest-dom';
|
import '@testing-library/jest-dom';
|
||||||
import 'jest-styled-components';
|
import 'jest-styled-components';
|
||||||
|
import './src/styles.scss';
|
||||||
|
|
||||||
|
import { server } from './src/mocks-server/server';
|
||||||
|
// Establish API mocking before all tests.
|
||||||
|
|
||||||
// Mock window.matchMedia
|
// Mock window.matchMedia
|
||||||
window.matchMedia =
|
window.matchMedia =
|
||||||
@@ -18,3 +22,9 @@ window.matchMedia =
|
|||||||
removeListener: function () {},
|
removeListener: function () {},
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
beforeAll(() => server.listen());
|
||||||
|
|
||||||
|
afterEach(() => server.resetHandlers());
|
||||||
|
|
||||||
|
afterAll(() => server.close());
|
||||||
|
|||||||
@@ -13,6 +13,8 @@
|
|||||||
"jest": "jest",
|
"jest": "jest",
|
||||||
"jest:coverage": "jest --coverage",
|
"jest:coverage": "jest --coverage",
|
||||||
"jest:watch": "jest --watch",
|
"jest:watch": "jest --watch",
|
||||||
|
"jest-preview": "jest-preview",
|
||||||
|
"test:debug": "npm-run-all -p test jest-preview",
|
||||||
"postinstall": "is-ci || yarn husky:configure",
|
"postinstall": "is-ci || yarn husky:configure",
|
||||||
"playwright": "npm run i18n:generate-hash && NODE_ENV=testing playwright test --config=./playwright.config.ts",
|
"playwright": "npm run i18n:generate-hash && NODE_ENV=testing playwright test --config=./playwright.config.ts",
|
||||||
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
|
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
|
||||||
@@ -29,27 +31,39 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ant-design/colors": "6.0.0",
|
"@ant-design/colors": "6.0.0",
|
||||||
"@ant-design/icons": "4.8.0",
|
"@ant-design/icons": "4.8.0",
|
||||||
"@grafana/data": "^8.4.3",
|
"@dnd-kit/core": "6.1.0",
|
||||||
|
"@dnd-kit/modifiers": "7.0.0",
|
||||||
|
"@dnd-kit/sortable": "8.0.0",
|
||||||
|
"@grafana/data": "^9.5.2",
|
||||||
|
"@mdx-js/loader": "2.3.0",
|
||||||
|
"@mdx-js/react": "2.3.0",
|
||||||
"@monaco-editor/react": "^4.3.1",
|
"@monaco-editor/react": "^4.3.1",
|
||||||
|
"@radix-ui/react-tabs": "1.0.4",
|
||||||
|
"@radix-ui/react-tooltip": "1.0.7",
|
||||||
|
"@sentry/react": "7.102.1",
|
||||||
|
"@sentry/webpack-plugin": "2.14.2",
|
||||||
|
"@signozhq/design-tokens": "0.0.8",
|
||||||
|
"@uiw/react-md-editor": "3.23.5",
|
||||||
"@xstate/react": "^3.0.0",
|
"@xstate/react": "^3.0.0",
|
||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"antd": "5.0.5",
|
"antd": "5.11.0",
|
||||||
"antd-table-saveas-excel": "2.2.1",
|
"antd-table-saveas-excel": "2.2.1",
|
||||||
"axios": "^0.21.0",
|
"axios": "1.6.2",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^26.6.0",
|
"babel-jest": "^29.6.4",
|
||||||
"babel-loader": "8.1.0",
|
"babel-loader": "9.1.3",
|
||||||
"babel-plugin-named-asset-import": "^0.3.7",
|
"babel-plugin-named-asset-import": "^0.3.7",
|
||||||
"babel-preset-minify": "^0.5.1",
|
"babel-preset-minify": "^0.5.1",
|
||||||
"babel-preset-react-app": "^10.0.0",
|
"babel-preset-react-app": "^10.0.1",
|
||||||
"chart.js": "3.9.1",
|
"chart.js": "3.9.1",
|
||||||
"chartjs-adapter-date-fns": "^2.0.0",
|
"chartjs-adapter-date-fns": "^2.0.0",
|
||||||
"chartjs-plugin-annotation": "^1.4.0",
|
"chartjs-plugin-annotation": "^1.4.0",
|
||||||
|
"classnames": "2.3.2",
|
||||||
"color": "^4.2.1",
|
"color": "^4.2.1",
|
||||||
"color-alpha": "1.1.3",
|
"color-alpha": "1.1.3",
|
||||||
"cross-env": "^7.0.3",
|
"cross-env": "^7.0.3",
|
||||||
"css-loader": "4.3.0",
|
"css-loader": "5.0.0",
|
||||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
"css-minimizer-webpack-plugin": "5.0.1",
|
||||||
"dayjs": "^1.10.7",
|
"dayjs": "^1.10.7",
|
||||||
"dompurify": "3.0.0",
|
"dompurify": "3.0.0",
|
||||||
"dotenv": "8.2.0",
|
"dotenv": "8.2.0",
|
||||||
@@ -58,7 +72,8 @@
|
|||||||
"file-loader": "6.1.1",
|
"file-loader": "6.1.1",
|
||||||
"fontfaceobserver": "2.3.0",
|
"fontfaceobserver": "2.3.0",
|
||||||
"history": "4.10.1",
|
"history": "4.10.1",
|
||||||
"html-webpack-plugin": "5.1.0",
|
"html-webpack-plugin": "5.5.0",
|
||||||
|
"http-proxy-middleware": "2.0.6",
|
||||||
"i18next": "^21.6.12",
|
"i18next": "^21.6.12",
|
||||||
"i18next-browser-languagedetector": "^6.1.3",
|
"i18next-browser-languagedetector": "^6.1.3",
|
||||||
"i18next-http-backend": "^1.3.2",
|
"i18next-http-backend": "^1.3.2",
|
||||||
@@ -67,38 +82,45 @@
|
|||||||
"less": "^4.1.2",
|
"less": "^4.1.2",
|
||||||
"less-loader": "^10.2.0",
|
"less-loader": "^10.2.0",
|
||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
|
"lucide-react": "0.321.0",
|
||||||
"mini-css-extract-plugin": "2.4.5",
|
"mini-css-extract-plugin": "2.4.5",
|
||||||
"papaparse": "5.4.1",
|
"papaparse": "5.4.1",
|
||||||
"react": "18.2.0",
|
"react": "18.2.0",
|
||||||
"react-addons-update": "15.6.3",
|
"react-addons-update": "15.6.3",
|
||||||
|
"react-beautiful-dnd": "13.1.1",
|
||||||
"react-dnd": "16.0.1",
|
"react-dnd": "16.0.1",
|
||||||
"react-dnd-html5-backend": "16.0.1",
|
"react-dnd-html5-backend": "16.0.1",
|
||||||
"react-dom": "18.2.0",
|
"react-dom": "18.2.0",
|
||||||
"react-drag-listview": "2.0.0",
|
"react-drag-listview": "2.0.0",
|
||||||
"react-force-graph": "^1.41.0",
|
"react-error-boundary": "4.0.11",
|
||||||
|
"react-force-graph": "^1.43.0",
|
||||||
|
"react-full-screen": "1.1.1",
|
||||||
"react-grid-layout": "^1.3.4",
|
"react-grid-layout": "^1.3.4",
|
||||||
"react-helmet-async": "1.3.0",
|
"react-helmet-async": "1.3.0",
|
||||||
"react-i18next": "^11.16.1",
|
"react-i18next": "^11.16.1",
|
||||||
"react-intersection-observer": "9.4.1",
|
"react-markdown": "8.0.7",
|
||||||
"react-query": "^3.34.19",
|
"react-query": "3.39.3",
|
||||||
"react-redux": "^7.2.2",
|
"react-redux": "^7.2.2",
|
||||||
"react-router-dom": "^5.2.0",
|
"react-router-dom": "^5.2.0",
|
||||||
|
"react-syntax-highlighter": "15.5.0",
|
||||||
"react-use": "^17.3.2",
|
"react-use": "^17.3.2",
|
||||||
"react-virtuoso": "4.0.3",
|
"react-virtuoso": "4.0.3",
|
||||||
"redux": "^4.0.5",
|
"redux": "^4.0.5",
|
||||||
"redux-thunk": "^2.3.0",
|
"redux-thunk": "^2.3.0",
|
||||||
|
"rehype-raw": "7.0.0",
|
||||||
"stream": "^0.0.2",
|
"stream": "^0.0.2",
|
||||||
"style-loader": "1.3.0",
|
"style-loader": "1.3.0",
|
||||||
"styled-components": "^5.2.1",
|
"styled-components": "^5.3.11",
|
||||||
"terser-webpack-plugin": "^5.2.5",
|
"terser-webpack-plugin": "^5.2.5",
|
||||||
"timestamp-nano": "^1.0.0",
|
"timestamp-nano": "^1.0.0",
|
||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||||
"typescript": "^4.0.5",
|
"typescript": "^4.0.5",
|
||||||
|
"uplot": "1.6.26",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-vitals": "^0.2.4",
|
"web-vitals": "^0.2.4",
|
||||||
"webpack": "^5.23.0",
|
"webpack": "5.88.2",
|
||||||
"webpack-dev-server": "^4.3.1",
|
"webpack-dev-server": "^4.15.1",
|
||||||
"xstate": "^4.31.0"
|
"xstate": "^4.31.0"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
@@ -114,13 +136,13 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/core": "^7.12.3",
|
"@babel/core": "^7.22.11",
|
||||||
"@babel/plugin-proposal-class-properties": "^7.12.13",
|
"@babel/plugin-proposal-class-properties": "^7.18.6",
|
||||||
"@babel/plugin-syntax-jsx": "^7.12.13",
|
"@babel/plugin-syntax-jsx": "^7.12.13",
|
||||||
"@babel/preset-env": "^7.12.17",
|
"@babel/preset-env": "^7.22.14",
|
||||||
"@babel/preset-react": "^7.12.13",
|
"@babel/preset-react": "^7.12.13",
|
||||||
"@babel/preset-typescript": "^7.12.17",
|
"@babel/preset-typescript": "^7.21.4",
|
||||||
"@commitlint/cli": "^16.2.4",
|
"@commitlint/cli": "^16.3.0",
|
||||||
"@commitlint/config-conventional": "^16.2.4",
|
"@commitlint/config-conventional": "^16.2.4",
|
||||||
"@jest/globals": "^27.5.1",
|
"@jest/globals": "^27.5.1",
|
||||||
"@playwright/test": "^1.22.0",
|
"@playwright/test": "^1.22.0",
|
||||||
@@ -140,31 +162,33 @@
|
|||||||
"@types/papaparse": "5.3.7",
|
"@types/papaparse": "5.3.7",
|
||||||
"@types/react": "18.0.26",
|
"@types/react": "18.0.26",
|
||||||
"@types/react-addons-update": "0.14.21",
|
"@types/react-addons-update": "0.14.21",
|
||||||
|
"@types/react-beautiful-dnd": "13.1.8",
|
||||||
"@types/react-dom": "18.0.10",
|
"@types/react-dom": "18.0.10",
|
||||||
"@types/react-grid-layout": "^1.1.2",
|
"@types/react-grid-layout": "^1.1.2",
|
||||||
"@types/react-helmet-async": "1.0.3",
|
"@types/react-helmet-async": "1.0.3",
|
||||||
"@types/react-redux": "^7.1.11",
|
"@types/react-redux": "^7.1.11",
|
||||||
"@types/react-resizable": "3.0.3",
|
"@types/react-resizable": "3.0.3",
|
||||||
"@types/react-router-dom": "^5.1.6",
|
"@types/react-router-dom": "^5.1.6",
|
||||||
|
"@types/react-syntax-highlighter": "15.5.7",
|
||||||
|
"@types/redux-mock-store": "1.0.4",
|
||||||
"@types/styled-components": "^5.1.4",
|
"@types/styled-components": "^5.1.4",
|
||||||
"@types/uuid": "^8.3.1",
|
"@types/uuid": "^8.3.1",
|
||||||
"@types/webpack": "^5.28.0",
|
"@types/webpack": "^5.28.0",
|
||||||
"@types/webpack-dev-server": "^4.3.0",
|
"@types/webpack-dev-server": "^4.7.2",
|
||||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||||
"@typescript-eslint/parser": "^4.28.2",
|
"@typescript-eslint/parser": "^4.33.0",
|
||||||
"@welldone-software/why-did-you-render": "6.2.1",
|
|
||||||
"autoprefixer": "^9.0.0",
|
"autoprefixer": "^9.0.0",
|
||||||
"babel-plugin-styled-components": "^1.12.0",
|
"babel-plugin-styled-components": "^1.12.0",
|
||||||
"compression-webpack-plugin": "9.0.0",
|
"compression-webpack-plugin": "9.0.0",
|
||||||
"copy-webpack-plugin": "^8.1.0",
|
"copy-webpack-plugin": "^8.1.0",
|
||||||
"critters-webpack-plugin": "^3.0.1",
|
"critters-webpack-plugin": "^3.0.1",
|
||||||
"eslint": "^7.30.0",
|
"eslint": "^7.32.0",
|
||||||
"eslint-config-airbnb": "^19.0.4",
|
"eslint-config-airbnb": "^19.0.4",
|
||||||
"eslint-config-airbnb-typescript": "^16.1.4",
|
"eslint-config-airbnb-typescript": "^16.1.4",
|
||||||
"eslint-config-prettier": "^8.3.0",
|
"eslint-config-prettier": "^8.3.0",
|
||||||
"eslint-config-standard": "^16.0.3",
|
"eslint-config-standard": "^16.0.3",
|
||||||
"eslint-plugin-import": "^2.25.4",
|
"eslint-plugin-import": "^2.28.1",
|
||||||
"eslint-plugin-jest": "^26.1.2",
|
"eslint-plugin-jest": "^26.9.0",
|
||||||
"eslint-plugin-jsx-a11y": "^6.5.1",
|
"eslint-plugin-jsx-a11y": "^6.5.1",
|
||||||
"eslint-plugin-node": "^11.1.0",
|
"eslint-plugin-node": "^11.1.0",
|
||||||
"eslint-plugin-prettier": "^4.0.0",
|
"eslint-plugin-prettier": "^4.0.0",
|
||||||
@@ -175,27 +199,38 @@
|
|||||||
"eslint-plugin-sonarjs": "^0.12.0",
|
"eslint-plugin-sonarjs": "^0.12.0",
|
||||||
"husky": "^7.0.4",
|
"husky": "^7.0.4",
|
||||||
"is-ci": "^3.0.1",
|
"is-ci": "^3.0.1",
|
||||||
"jest-playwright-preset": "^1.7.0",
|
"jest-playwright-preset": "^1.7.2",
|
||||||
|
"jest-preview": "0.3.1",
|
||||||
"jest-styled-components": "^7.0.8",
|
"jest-styled-components": "^7.0.8",
|
||||||
"lint-staged": "^12.3.7",
|
"lint-staged": "^12.5.0",
|
||||||
|
"msw": "1.3.2",
|
||||||
|
"npm-run-all": "latest",
|
||||||
"portfinder-sync": "^0.0.2",
|
"portfinder-sync": "^0.0.2",
|
||||||
"prettier": "2.2.1",
|
"prettier": "2.2.1",
|
||||||
|
"raw-loader": "4.0.2",
|
||||||
"react-hooks-testing-library": "0.6.0",
|
"react-hooks-testing-library": "0.6.0",
|
||||||
"react-hot-loader": "^4.13.0",
|
"react-hot-loader": "^4.13.0",
|
||||||
"react-resizable": "3.0.4",
|
"react-resizable": "3.0.4",
|
||||||
"ts-jest": "^27.1.4",
|
"redux-mock-store": "1.5.4",
|
||||||
|
"sass": "1.66.1",
|
||||||
|
"sass-loader": "13.3.2",
|
||||||
|
"ts-jest": "^27.1.5",
|
||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"typescript-plugin-css-modules": "^3.4.0",
|
"typescript-plugin-css-modules": "5.0.1",
|
||||||
"webpack-bundle-analyzer": "^4.5.0",
|
"webpack-bundle-analyzer": "^4.5.0",
|
||||||
"webpack-cli": "^4.9.2"
|
"webpack-cli": "^4.9.2"
|
||||||
},
|
},
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
"*.(js|jsx|ts|tsx)": [
|
"*.(js|jsx|ts|tsx)": [
|
||||||
"eslint --fix"
|
"eslint --fix",
|
||||||
|
"sh scripts/typecheck-staged.sh"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"resolutions": {
|
"resolutions": {
|
||||||
"@types/react": "18.0.26",
|
"@types/react": "18.0.26",
|
||||||
"@types/react-dom": "18.0.10"
|
"@types/react-dom": "18.0.10",
|
||||||
|
"debug": "4.3.4",
|
||||||
|
"semver": "7.5.4",
|
||||||
|
"xml2js": "0.5.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1
frontend/public/Icons/awwSnap.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="33" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M15.91 28.675c-6.199 0-12.888-3.888-12.888-12.421S9.711 3.832 15.911 3.832c3.444 0 6.621 1.134 8.977 3.2 2.555 2.267 3.91 5.466 3.91 9.222 0 3.755-1.355 6.933-3.91 9.2-2.356 2.066-5.555 3.221-8.977 3.221z" fill="url(#prefix__paint0_radial_2122_6520)"/><path d="M26.552 8.87c1.185 1.91 1.803 4.186 1.803 6.717 0 3.756-1.356 6.933-3.911 9.2-2.356 2.066-5.556 3.222-8.978 3.222-4.013 0-8.221-1.634-10.706-5.098 2.391 3.924 6.889 5.764 11.15 5.764 3.423 0 6.623-1.155 8.978-3.222 2.555-2.266 3.911-5.444 3.911-9.2 0-2.83-.771-5.346-2.247-7.383z" fill="#EB8F00"/><path d="M20.123 22.905c0 1.685-1.846 2.667-4.124 2.667-2.277 0-4.124-.989-4.124-2.667 0-1.677 1.847-3.522 4.124-3.522 2.278 0 4.124 1.838 4.124 3.522zM12.06 14.852l1.88-1.748c.267-.331.307-.778.038-1.045-.353-.355-.98-.269-1.32.136-.018.033-.03.042-.049.075l-1.333 1.938-1.804-1.682c-.027-.03-.042-.034-.067-.062-.42-.32-1.05-.267-1.315.157-.207.32-.07.745.264 1.011l2.313 1.372-1.96 1.833c-.262.326-.31.77-.04 1.044.351.358.978.276 1.32-.127.018-.033.031-.042.051-.075l1.405-2.031 1.706 1.609c.027.029.043.035.067.064.418.322 1.049.273 1.318-.149.206-.32.07-.746-.26-1.013l-2.213-1.307zM20.61 14.852l-1.879-1.748c-.267-.331-.307-.778-.036-1.045.354-.355.978-.269 1.318.136.018.033.034.042.051.075l1.334 1.938 1.806-1.682c.025-.03.04-.034.065-.062.422-.32 1.05-.267 1.317.157.205.32.067.745-.266 1.011L22 15.004l1.96 1.833c.268.33.313.775.042 1.044-.349.358-.976.276-1.318-.127-.02-.033-.033-.042-.051-.075l-1.404-2.031-1.71 1.609c-.024.029-.04.035-.066.064-.418.322-1.046.273-1.315-.149-.21-.32-.074-.746.257-1.013l2.216-1.307zM11.911 8.696c.511.044.711-.645.178-.8a4.07 4.07 0 00-1.289-.133A4.596 4.596 0 007.689 9.14c-.378.4.156.89.556.6a5.829 5.829 0 013.666-1.044zM20.044 8.696a5.85 5.85 0 013.689 1.044c.4.29.933-.2.555-.6a4.645 4.645 0 00-3.11-1.377 4.07 4.07 0 00-1.29.133.408.408 0 00-.282.504c.053.194.24.318.438.296z" fill="#422B0D"/><defs><radialGradient id="prefix__paint0_radial_2122_6520" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(15.91 16.254) scale(12.657)"><stop offset=".5" stop-color="#FDE030"/><stop offset=".92" stop-color="#F7C02B"/><stop offset="1" stop-color="#F4A223"/></radialGradient></defs></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
frontend/public/Icons/cable-car.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#prefix__clip0_2022_1972)" stroke="#fff" stroke-width="1.333" stroke-linecap="round" stroke-linejoin="round"><path d="M6.667 2h.006M9.333 1.333h.007M1.333 6l13.334-3.333M8 8V4.333M11.333 8H4.667a2 2 0 00-2 2v2.667a2 2 0 002 2h6.666a2 2 0 002-2V10a2 2 0 00-2-2zM6 8v3.333M10 8v3.333M2.667 11.334h10.666"/></g><defs><clipPath id="prefix__clip0_2022_1972"><path fill="#fff" d="M0 0h16v16H0z"/></clipPath></defs></svg>
|
||||||
|
After Width: | Height: | Size: 507 B |
1
frontend/public/Icons/configure.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g stroke="#C0C1C3" stroke-width="1.333" stroke-linecap="round"><path d="M9.71 4.745a.576.576 0 000 .806l.922.922a.576.576 0 00.806 0l2.171-2.171a3.455 3.455 0 01-4.572 4.572l-3.98 3.98a1.222 1.222 0 11-1.727-1.728l3.98-3.98a3.455 3.455 0 014.572-4.572L9.717 4.739l-.006.006z" stroke-linejoin="round"/><path d="M4 7L2.527 5.566a1.333 1.333 0 01-.013-1.898l.81-.81a1.333 1.333 0 011.991.119L5.333 3M10.75 10.988l1.179 1.178m0 0l-.138.138a.833.833 0 00.387 1.397v0a.833.833 0 00.792-.219l.446-.446a.833.833 0 00.176-.917v0a.833.833 0 00-1.355-.261l-.308.308z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 644 B |
1
frontend/public/Icons/emptyState.svg
Normal file
|
After Width: | Height: | Size: 5.6 KiB |
1
frontend/public/Icons/group.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="16" height="16" fill="none" xmlns="http://www.w3.org/2000/svg"><g stroke="#C0C1C3" stroke-width="1.333" stroke-linecap="round" stroke-linejoin="round"><path d="M2 4.667V3.333C2 2.6 2.6 2 3.333 2h1.334M11.333 2h1.334C13.4 2 14 2.6 14 3.333v1.334M14 11.334v1.333C14 13.4 13.4 14 12.667 14h-1.334M4.667 14H3.333C2.6 14 2 13.4 2 12.667v-1.333M8.667 4.667H5.333a.667.667 0 00-.666.666v2c0 .368.298.667.666.667h3.334a.667.667 0 00.666-.667v-2a.667.667 0 00-.666-.667zM10.667 8H7.333a.667.667 0 00-.666.667v2c0 .368.298.666.666.666h3.334a.667.667 0 00.666-.666v-2A.667.667 0 0010.667 8z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 604 B |
BIN
frontend/public/Icons/loading-plane.gif
Normal file
|
After Width: | Height: | Size: 88 KiB |
1
frontend/public/Icons/promQL.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="24" height="24" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M12 2c1 2.538 2.5 2.962 3.5 3.808.942.78 1.481 1.845 1.5 2.961 0 1.122-.527 2.198-1.464 2.992C14.598 12.554 13.326 13 12 13s-2.598-.446-3.536-1.24C7.527 10.968 7 9.892 7 8.77c0-.255 0-.508.1-.762.085.25.236.48.443.673.207.193.463.342.75.437a2.334 2.334 0 001.767-.128c.263-.135.485-.32.65-.539.166-.22.269-.468.301-.727a1.452 1.452 0 00-.11-.765 1.699 1.699 0 00-.501-.644C8 4.115 11 2 12 2zM17 16l-5 6-5-6h10z" stroke="#fff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
|
||||||
|
After Width: | Height: | Size: 581 B |
1
frontend/public/Icons/redis-logo.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="24" height="24" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M23.06 17.526c-1.281.668-7.916 3.396-9.328 4.132-1.413.736-2.198.73-3.314.196C9.303 21.32 2.242 18.468.97 17.86c-.636-.303-.97-.56-.97-.802v-2.426s9.192-2.001 10.676-2.534c1.484-.532 1.999-.551 3.262-.089 1.263.463 8.814 1.826 10.062 2.283v2.391c0 .24-.288.503-.94.843z" fill="#912626"/><path d="M23.06 15.114c-1.281.668-7.916 3.396-9.329 4.132-1.412.737-2.197.73-3.313.196C9.302 18.91 2.242 16.056.97 15.45c-1.272-.608-1.298-1.027-.049-1.516 1.25-.49 8.271-3.244 9.755-3.776 1.484-.533 1.999-.552 3.262-.09 1.263.463 7.858 3.088 9.106 3.546 1.248.457 1.296.834.015 1.501z" fill="#C6302B"/><path d="M23.06 13.6c-1.281.668-7.916 3.396-9.328 4.133-1.413.736-2.198.73-3.314.196S2.242 14.543.97 13.935c-.636-.304-.97-.56-.97-.802v-2.426s9.192-2.001 10.676-2.534c1.484-.532 1.999-.551 3.262-.089C15.2 8.547 22.752 9.91 24 10.366v2.392c0 .24-.288.503-.94.843z" fill="#912626"/><path d="M23.06 11.19c-1.281.667-7.916 3.395-9.329 4.131-1.412.737-2.197.73-3.313.196-1.116-.533-8.176-3.386-9.448-3.993-1.272-.608-1.298-1.027-.049-1.516 1.25-.49 8.271-3.244 9.755-3.776 1.484-.533 1.999-.552 3.262-.09 1.263.463 7.858 3.088 9.106 3.545 1.248.458 1.296.835.015 1.502z" fill="#C6302B"/><path d="M23.06 9.53c-1.281.668-7.916 3.396-9.328 4.132-1.413.737-2.198.73-3.314.196-1.116-.533-8.176-3.386-9.448-3.993C.334 9.56 0 9.305 0 9.062V6.636s9.192-2 10.676-2.533c1.484-.533 1.999-.552 3.262-.09C15.2 4.477 22.752 5.84 24 6.297v2.392c0 .24-.288.502-.94.842z" fill="#912626"/><path d="M23.06 7.118c-1.281.668-7.916 3.396-9.329 4.132-1.412.737-2.197.73-3.313.196C9.303 10.913 2.242 8.061.97 7.453-.302 6.845-.328 6.427.921 5.937c1.25-.489 8.271-3.244 9.755-3.776 1.484-.532 1.999-.552 3.262-.089 1.263.463 7.858 3.088 9.106 3.545 1.248.457 1.296.834.015 1.501z" fill="#C6302B"/><path d="M14.933 4.758l-2.064.215-.462 1.111-.746-1.24L9.28 4.63l1.778-.641-.534-.985 1.665.651 1.569-.513-.424 1.017 1.6.6zm-2.649 5.393l-3.85-1.597 5.517-.847-1.667 2.444zM6.945 5.376c1.63 0 2.95.512 2.95 1.143 0 .632-1.32 1.144-2.95 1.144-1.629 0-2.95-.512-2.95-1.144 0-.63 1.321-1.143 2.95-1.143z" fill="#fff"/><path d="M17.371 5.062l3.266 1.29-3.263 1.29-.003-2.58z" fill="#621B1C"/><path d="M13.758 6.492l3.613-1.43.003 2.58-.354.139-3.262-1.29z" fill="#9A2928"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
frontend/public/Icons/tetra-pack.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg width="32" height="33" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M14.309 13.108l-6.704-3.32s-.016-.317.284-.477c.302-.16 5.053-2.107 5.435-2.107.383 0 2.62.431 4.249.793 1.629.363 5.933 1.287 5.953 1.57.02.281-4.404 4.806-4.404 4.806l-4.813-1.265z" fill="#C3FECE"/><path d="M20.423 11.037s-2.811-.826-5.546-1.469c-1.274-.3-5.016-1.084-5.016-1.084s.398-.173.698-.3c.305-.127.547-.193.547-.193s2.44.486 4.253.873c2.453.522 5.886 1.547 5.966 1.709.082.16-.902.464-.902.464z" fill="#fff"/><path d="M14.98 10.26c-.598.415-.011.666 1.09.924 1.207.282 2.127.698 2.903.247.7-.405-1.014-.845-1.8-1.014-.6-.129-1.731-.478-2.193-.158z" fill="#ACB1B2"/><path d="M17.17 11.095c-.005 0 .02-4.869.02-5.049 0-.18-.203-.342.02-.724.222-.382.804-.342.804-.342s2.416-.702 3.38-.945c.964-.242 3.098-.804 3.098-.804l.142 1.22s-2.236.631-3.342.913c-1.107.282-2.616.745-2.616.745l-.222.202.064 4.757s-.206.231-.668.231c-.45-.002-.68-.204-.68-.204z" fill="#FFD816"/><path d="M24.095 3.855c.018.38.22.616.46.616.24 0 .404-.307.369-.707-.038-.398-.296-.58-.516-.506-.22.073-.327.32-.313.597zM18.46 6.422a.209.209 0 01-.123-.038l-1.153-.769a.225.225 0 01-.063-.309.222.222 0 01.31-.062l1.153.769a.224.224 0 01.062.309.228.228 0 01-.187.1z" fill="#FEB804"/><path d="M18.636 6.235a.225.225 0 01-.178-.089c-.295-.393-.633-.84-.693-.909a.225.225 0 01-.031-.284.222.222 0 01.309-.062c.04.027.062.042.771.986.073.098.007.238-.091.312-.04.03-.04.046-.087.046z" fill="#FEB804"/><path d="M18.365 6.609c-.01 0-.022 0-.035-.003l-1.111-.175a.221.221 0 11.069-.438l1.11.176c.12.02.225.042.205.164-.016.107-.129.276-.238.276z" fill="#FEB804"/><path d="M7.596 9.764c.353 0 3.188.744 4.65 1.013 1.463.27 5.878 1.314 6.027 1.342.149.03.12 1.94.12 1.94s2.089 10.8 2.029 11.309c-.06.506-1.431 4.415-1.431 4.415s-.807.12-2.865-.478c-2.057-.598-7.488-2.089-7.817-2.506-.329-.418-.12-5.938-.298-9.338-.182-3.402-.415-7.697-.415-7.697z" fill="#79DD8A"/><path d="M24.06 27.036c.113-.375-.518-4.402-.607-8.101-.089-3.698.229-9.324.076-9.369-.154-.042-5.256 2.553-5.256 2.553s-.022 3.671.04 7.133c.08 4.48.438 10.41.676 10.53.238.12 2.302-1.035 2.924-1.372 1.102-.598 2.058-1.074 2.147-1.374z" fill="#02AB46"/><path d="M20.408 13.82l.011-2.787.914-.45.026 3.056-.422.74-.529-.56z" fill="#DBDFE1"/><path d="M12.322 14.797c-1.973-.211-3.34 1.549-3.233 3.842.127 2.709 1.91 4.704 3.842 5.102 1.93.398 3.802-.44 3.842-3.402.044-3.087-2.669-5.353-4.451-5.542z" fill="#FEFEFD"/><path d="M13.637 17.27s-.4-1.344-1.602-.986c-1.202.357-1.853 2.973.187 4.15 1.96 1.131 3.764-.944 3.133-2.288-.574-1.227-1.718-.876-1.718-.876z" fill="#EF5B44"/><path d="M13.18 15.626c-.136.049-.243.602-.1 1.13.106.396.446.939.643.903.158-.029.278-.651.13-1.173-.174-.602-.516-.918-.674-.86z" fill="#B8CF17"/><path d="M13.15 18.746c-.564-.171-1.2 1.769-.057 2.977 1.26 1.331 2.73.158 2.69-.1-.057-.358-1.044-.615-1.53-1.215-.487-.605-.774-1.562-1.102-1.662z" fill="#FD8F01"/><path d="M11.346 18.417s.113-.849-.673-.802c-.76.046-.574.944-.574.944s-.633.076-.526.778c.08.53.64.524.64.524s-.616.242-.336.945c.249.624.822.373.822.373s-.21.609.287.93c.42.272.787.043.787.043s-.023.52.557.616c.703.115 1.007-.74.507-1.136-.38-.3-.724-.067-.724-.067s.07-.166.004-.357c-.045-.125-.116-.171-.116-.171s.616-.058.516-.758c-.1-.702-.716-.616-.716-.616s.358-.286.216-.802c-.14-.518-.671-.444-.671-.444z" fill="#A281D0"/><path d="M21.04 14.595c-.511 0-2.691-2.167-2.711-2.189a.222.222 0 01.024-.313.224.224 0 01.314.022c.14.155 1.806 1.702 2.286 2 .311-.465 1.322-2.498 2.191-4.333a.224.224 0 01.296-.107.223.223 0 01.106.296c-2.142 4.526-2.353 4.586-2.466 4.617-.013.007-.027.007-.04.007z" fill="#2D802D"/></svg>
|
||||||
|
After Width: | Height: | Size: 3.6 KiB |
19
frontend/public/Images/eyesEmoji.svg
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<svg width="32" height="33" viewBox="0 0 32 33" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M9.36806 25.9481C5.93935 25.9481 3.15283 21.7098 3.15283 16.5002C3.15283 11.2907 5.94157 7.05238 9.36806 7.05238C12.7945 7.05238 15.5833 11.2907 15.5833 16.5002C15.5833 21.7098 12.7945 25.9481 9.36806 25.9481Z" fill="#FAFAFA"/>
|
||||||
|
<path d="M9.36815 7.49694C10.8414 7.49694 12.2524 8.38594 13.3391 10.0017C14.499 11.7241 15.139 14.0333 15.139 16.5003C15.139 18.9673 14.499 21.2764 13.3391 22.9989C12.2524 24.6146 10.8414 25.5036 9.36815 25.5036C7.89489 25.5036 6.48385 24.6146 5.39724 22.9989C4.23508 21.2764 3.59734 18.9673 3.59734 16.5003C3.59734 14.0333 4.23731 11.7241 5.39724 10.0017C6.48385 8.38594 7.89267 7.49694 9.36815 7.49694ZM9.36815 6.60794C5.69056 6.60794 2.7085 11.0374 2.7085 16.5003C2.7085 21.9632 5.69056 26.3926 9.36815 26.3926C13.0457 26.3926 16.0278 21.9632 16.0278 16.5003C16.0278 11.0374 13.0457 6.60794 9.36815 6.60794Z" fill="#B0BEC5"/>
|
||||||
|
<path d="M7.47266 15.5762C6.87269 15.0118 7.00602 13.8919 7.77487 13.0741C7.81486 13.0319 7.85486 12.9919 7.89708 12.9541C7.55488 12.7608 7.17934 12.6519 6.78381 12.6519C5.18611 12.6519 3.89062 14.414 3.89062 16.585C3.89062 18.756 5.18611 20.5182 6.78381 20.5182C8.3815 20.5182 9.67699 18.756 9.67699 16.585C9.67699 16.1962 9.63477 15.8184 9.55699 15.4629C8.83703 15.9806 7.97708 16.0495 7.47266 15.5762Z" fill="url(#paint0_linear_2122_5062)"/>
|
||||||
|
<path d="M22.6294 26.3932C26.3074 26.3932 29.289 21.9642 29.289 16.5008C29.289 11.0374 26.3074 6.60847 22.6294 6.60847C18.9514 6.60847 15.9697 11.0374 15.9697 16.5008C15.9697 21.9642 18.9514 26.3932 22.6294 26.3932Z" fill="#EEEEEE"/>
|
||||||
|
<path d="M22.6283 25.9493C19.2018 25.9493 16.4131 21.711 16.4131 16.5014C16.4131 11.2919 19.2018 7.05357 22.6283 7.05357C26.0548 7.05357 28.8435 11.2919 28.8435 16.5014C28.8435 21.711 26.057 25.9493 22.6283 25.9493Z" fill="#FAFAFA"/>
|
||||||
|
<path d="M22.6284 7.49816C24.1017 7.49816 25.5127 8.38716 26.5993 10.0029C27.7592 11.7254 28.3992 14.0345 28.3992 16.5015C28.3992 18.9685 27.7592 21.2777 26.5993 23.0001C25.5127 24.6159 24.1017 25.5049 22.6284 25.5049C21.1551 25.5049 19.7441 24.6159 18.6575 23.0001C17.4976 21.2777 16.8576 18.9685 16.8576 16.5015C16.8576 14.0345 17.4976 11.7254 18.6575 10.0029C19.7441 8.38716 21.1551 7.49816 22.6284 7.49816ZM22.6284 6.60916C18.9508 6.60916 15.9688 11.0386 15.9688 16.5015C15.9688 21.9644 18.9508 26.3939 22.6284 26.3939C26.306 26.3939 29.2881 21.9644 29.2881 16.5015C29.2881 11.0386 26.306 6.60916 22.6284 6.60916Z" fill="#B0BEC5"/>
|
||||||
|
<path d="M20.7339 15.5767C20.1339 15.0123 20.2672 13.8924 21.0361 13.0746C21.0761 13.0324 21.1161 12.9924 21.1583 12.9546C20.8161 12.7613 20.4406 12.6524 20.045 12.6524C18.4473 12.6524 17.1519 14.4146 17.1519 16.5856C17.1519 18.7566 18.4473 20.5187 20.045 20.5187C21.6427 20.5187 22.9382 18.7566 22.9382 16.5856C22.9382 16.1967 22.896 15.8189 22.8182 15.4634C22.1005 15.9812 21.2383 16.05 20.7339 15.5767Z" fill="url(#paint1_linear_2122_5062)"/>
|
||||||
|
<defs>
|
||||||
|
<linearGradient id="paint0_linear_2122_5062" x1="6.78232" y1="12.651" x2="6.78232" y2="20.5188" gradientUnits="userSpaceOnUse">
|
||||||
|
<stop stop-color="#424242"/>
|
||||||
|
<stop offset="1" stop-color="#212121"/>
|
||||||
|
</linearGradient>
|
||||||
|
<linearGradient id="paint1_linear_2122_5062" x1="20.0449" y1="12.6515" x2="20.0449" y2="20.5193" gradientUnits="userSpaceOnUse">
|
||||||
|
<stop stop-color="#424242"/>
|
||||||
|
<stop offset="1" stop-color="#212121"/>
|
||||||
|
</linearGradient>
|
||||||
|
</defs>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 3.3 KiB |
BIN
frontend/public/Images/notFound404.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
frontend/public/Logos/cloudwatch.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
1
frontend/public/Logos/cmd-terminal.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 103.53" style="enable-background:new 0 0 122.88 103.53" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M5.47,0h111.93c3.01,0,5.47,2.46,5.47,5.47v92.58c0,3.01-2.46,5.47-5.47,5.47H5.47 c-3.01,0-5.47-2.46-5.47-5.47V5.47C0,2.46,2.46,0,5.47,0L5.47,0z M31.84,38.55l17.79,18.42l2.14,2.13l-2.12,2.16L31.68,80.31 l-5.07-5l15.85-16.15L26.81,43.6L31.84,38.55L31.84,38.55z M94.1,79.41H54.69v-6.84H94.1V79.41L94.1,79.41z M38.19,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S35,9.83,38.19,9.83L38.19,9.83z M18.95,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S15.75,9.83,18.95,9.83L18.95,9.83z M7.49,5.41 h107.91c1.15,0,2.09,0.94,2.09,2.09v18.32H5.4V7.5C5.4,6.35,6.34,5.41,7.49,5.41L7.49,5.41z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 1.0 KiB |
1
frontend/public/Logos/docker.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 88.17" style="enable-background:new 0 0 122.88 88.17" xml:space="preserve"><style type="text/css">.st0{fill:#0091E2;}</style><g><path class="st0" d="M121.68,33.34c-0.34-0.28-3.42-2.62-10.03-2.62c-1.71,0-3.48,0.17-5.19,0.46c-1.25-8.72-8.49-12.94-8.78-13.16 l-1.77-1.03l-1.14,1.65c-1.42,2.22-2.51,4.73-3.13,7.29c-1.2,4.96-0.46,9.63,2.05,13.62c-3.02,1.71-7.92,2.11-8.95,2.17l-80.93,0 c-2.11,0-3.82,1.71-3.82,3.82c-0.11,7.07,1.08,14.13,3.53,20.8c2.79,7.29,6.95,12.71,12.31,16.01c6.04,3.7,15.9,5.81,27.01,5.81 c5.01,0,10.03-0.46,14.99-1.37c6.9-1.25,13.51-3.65,19.6-7.12c5.02-2.91,9.52-6.61,13.34-10.94c6.44-7.24,10.26-15.33,13.05-22.51 c0.4,0,0.74,0,1.14,0c7.01,0,11.34-2.79,13.73-5.19c1.6-1.48,2.79-3.31,3.65-5.36l0.51-1.48L121.68,33.34L121.68,33.34z M71.59,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C70.68,38.98,71.08,39.38,71.59,39.38L71.59,39.38z M56.49,11.63h10.83c0.51,0,0.97-0.4,0.97-0.97V0.97c0-0.51-0.46-0.97-0.97-0.97 L56.49,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,11.17,55.97,11.63,56.49,11.63L56.49,11.63z M56.49,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.46-0.97-0.97-0.97H56.49c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C55.52,25.08,55.97,25.53,56.49,25.53L56.49,25.53z M41.5,25.53h10.83c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0H41.5c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C40.53,25.08,40.93,25.53,41.5,25.53L41.5,25.53z M26.28,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0H26.28c-0.51,0-0.97,0.4-0.97,0.97v9.69 C25.37,25.08,25.77,25.53,26.28,25.53L26.28,25.53z M56.49,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,38.98,55.97,39.38,56.49,39.38L56.49,39.38L56.49,39.38z M41.5,39.38 h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C40.53,38.98,40.93,39.38,41.5,39.38L41.5,39.38L41.5,39.38z M26.28,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69 c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97v9.69C25.37,38.98,25.77,39.38,26.28,39.38L26.28,39.38z M11.35,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0 v9.69C10.44,38.98,10.84,39.38,11.35,39.38L11.35,39.38L11.35,39.38z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 2.5 KiB |
BIN
frontend/public/Logos/dotnet.png
Normal file
|
After Width: | Height: | Size: 3.7 KiB |
18
frontend/public/Logos/ec2.svg
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<svg width="80px" height="80px" viewBox="0 0 80 80" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||||
|
<!-- Generator: Sketch 64 (93537) - https://sketch.com -->
|
||||||
|
<title>Icon-Architecture/64/Arch_Amazon-EC2_64</title>
|
||||||
|
<desc>Created with Sketch.</desc>
|
||||||
|
<defs>
|
||||||
|
<linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="linearGradient-1">
|
||||||
|
<stop stop-color="#C8511B" offset="0%"></stop>
|
||||||
|
<stop stop-color="#FF9900" offset="100%"></stop>
|
||||||
|
</linearGradient>
|
||||||
|
</defs>
|
||||||
|
<g id="Icon-Architecture/64/Arch_Amazon-EC2_64" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||||
|
<g id="Icon-Architecture-BG/64/Compute" fill="url(#linearGradient-1)">
|
||||||
|
<rect id="Rectangle" x="0" y="0" width="80" height="80"></rect>
|
||||||
|
</g>
|
||||||
|
<path d="M27,53 L52,53 L52,28 L27,28 L27,53 Z M54,28 L58,28 L58,30 L54,30 L54,34 L58,34 L58,36 L54,36 L54,39 L58,39 L58,41 L54,41 L54,45 L58,45 L58,47 L54,47 L54,51 L58,51 L58,53 L54,53 L54,53.136 C54,54.164 53.164,55 52.136,55 L52,55 L52,59 L50,59 L50,55 L46,55 L46,59 L44,59 L44,55 L41,55 L41,59 L39,59 L39,55 L35,55 L35,59 L33,59 L33,55 L29,55 L29,59 L27,59 L27,55 L26.864,55 C25.836,55 25,54.164 25,53.136 L25,53 L22,53 L22,51 L25,51 L25,47 L22,47 L22,45 L25,45 L25,41 L22,41 L22,39 L25,39 L25,36 L22,36 L22,34 L25,34 L25,30 L22,30 L22,28 L25,28 L25,27.864 C25,26.836 25.836,26 26.864,26 L27,26 L27,22 L29,22 L29,26 L33,26 L33,22 L35,22 L35,26 L39,26 L39,22 L41,22 L41,26 L44,26 L44,22 L46,22 L46,26 L50,26 L50,22 L52,22 L52,26 L52.136,26 C53.164,26 54,26.836 54,27.864 L54,28 Z M41,65.876 C41,65.944 40.944,66 40.876,66 L14.124,66 C14.056,66 14,65.944 14,65.876 L14,39.124 C14,39.056 14.056,39 14.124,39 L20,39 L20,37 L14.124,37 C12.953,37 12,37.953 12,39.124 L12,65.876 C12,67.047 12.953,68 14.124,68 L40.876,68 C42.047,68 43,67.047 43,65.876 L43,61 L41,61 L41,65.876 Z M68,14.124 L68,40.876 C68,42.047 67.047,43 65.876,43 L60,43 L60,41 L65.876,41 C65.944,41 66,40.944 66,40.876 L66,14.124 C66,14.056 65.944,14 65.876,14 L39.124,14 C39.056,14 39,14.056 39,14.124 L39,20 L37,20 L37,14.124 C37,12.953 37.953,12 39.124,12 L65.876,12 C67.047,12 68,12.953 68,14.124 L68,14.124 Z" id="Amazon-EC2_Icon_64_Squid" fill="#FFFFFF"></path>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
18
frontend/public/Logos/ecs.svg
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<svg width="80px" height="80px" viewBox="0 0 80 80" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||||
|
<!-- Generator: Sketch 64 (93537) - https://sketch.com -->
|
||||||
|
<title>Icon-Architecture/64/Arch_Amazon-Elastic-Container-Service_64</title>
|
||||||
|
<desc>Created with Sketch.</desc>
|
||||||
|
<defs>
|
||||||
|
<linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="linearGradient-1">
|
||||||
|
<stop stop-color="#C8511B" offset="0%"></stop>
|
||||||
|
<stop stop-color="#FF9900" offset="100%"></stop>
|
||||||
|
</linearGradient>
|
||||||
|
</defs>
|
||||||
|
<g id="Icon-Architecture/64/Arch_Amazon-Elastic-Container-Service_64" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||||
|
<g id="Icon-Architecture-BG/64/Containers" fill="url(#linearGradient-1)">
|
||||||
|
<rect id="Rectangle" x="0" y="0" width="80" height="80"></rect>
|
||||||
|
</g>
|
||||||
|
<path d="M64,48.2340095 L56,43.4330117 L56,32.0000169 C56,31.6440171 55.812,31.3150172 55.504,31.1360173 L44,24.4260204 L44,14.7520248 L64,26.5710194 L64,48.2340095 Z M65.509,25.13902 L43.509,12.139026 C43.199,11.9560261 42.818,11.9540261 42.504,12.131026 C42.193,12.3090259 42,12.6410257 42,13.0000256 L42,25.0000201 C42,25.3550199 42.189,25.6840198 42.496,25.8640197 L54,32.5740166 L54,44.0000114 C54,44.3510113 54.185,44.6770111 54.486,44.857011 L64.486,50.8570083 C64.644,50.9520082 64.822,51 65,51 C65.17,51 65.34,50.9570082 65.493,50.8700083 C65.807,50.6930084 66,50.3600085 66,50 L66,26.0000196 C66,25.6460198 65.814,25.31902 65.509,25.13902 L65.509,25.13902 Z M40.445,66.863001 L17,54.3990067 L17,26.5710194 L37,14.7520248 L37,24.4510204 L26.463,31.1560173 C26.175,31.3400172 26,31.6580171 26,32.0000169 L26,49.0000091 C26,49.373009 26.208,49.7150088 26.538,49.8870087 L39.991,56.8870055 C40.28,57.0370055 40.624,57.0380055 40.912,56.8880055 L53.964,50.1440086 L61.996,54.9640064 L40.445,66.863001 Z M64.515,54.1420068 L54.515,48.1420095 C54.217,47.9640096 53.849,47.9520096 53.541,48.1120095 L40.455,54.8730065 L28,48.3930094 L28,32.5490167 L38.537,25.8440197 C38.825,25.6600198 39,25.3420199 39,25.0000201 L39,13.0000256 C39,12.6410257 38.808,12.3090259 38.496,12.131026 C38.184,11.9540261 37.802,11.9560261 37.491,12.139026 L15.491,25.13902 C15.187,25.31902 15,25.6460198 15,26.0000196 L15,55 C15,55.3690062 15.204,55.7090061 15.53,55.883006 L39.984,68.8830001 C40.131,68.961 40.292,69 40.453,69 C40.62,69 40.786,68.958 40.937,68.8750001 L64.484,55.875006 C64.797,55.7020061 64.993,55.3750062 65.0001416,55.0180064 C65.006,54.6600066 64.821,54.3260067 64.515,54.1420068 L64.515,54.1420068 Z" id="Amazon-Elastic-Container-Service_Icon_64_Squid" fill="#FFFFFF"></path>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.7 KiB |
2
frontend/public/Logos/eks.svg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||||
|
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none"><path fill="url(#amazon-eks-color-16__paint0_linear_879_141)" fill-rule="evenodd" d="M6.381 10.148h.897V8.121l1.837 2.027h1.164L7.997 7.642l2.169-2.195H8.963L7.278 7.146V5.447h-.897v4.701z" clip-rule="evenodd"/><path fill="url(#amazon-eks-color-16__paint1_linear_879_141)" d="M8.532 3.803l3.186 1.81a.173.173 0 01.088.149v3.62c0 .06.033.118.088.149l2.842 1.615a.176.176 0 00.264-.15V3.947a.173.173 0 00-.088-.15L8.708.274a.176.176 0 00-.264.15v3.23c0 .062.034.119.088.15z"/><path fill="url(#amazon-eks-color-16__paint2_linear_879_141)" d="M11.273 10.288l-3.185 1.81a.178.178 0 01-.176 0l-3.63-2.062a.173.173 0 01-.088-.15V5.762c0-.062.034-.119.088-.15l3.186-1.81a.172.172 0 00.088-.15V.424a.176.176 0 00-.264-.15L1.088 3.798a.173.173 0 00-.088.15V11.7c0 .061.033.118.088.15l6.824 3.876c.054.03.122.03.176 0l6.204-3.524a.172.172 0 000-.3l-2.843-1.615a.178.178 0 00-.176 0z"/><defs><linearGradient id="amazon-eks-color-16__paint0_linear_879_141" x1="10.691" x2="8.521" y1="9.879" y2="4.634" gradientUnits="userSpaceOnUse"><stop stop-color="#426DDB"/><stop offset="1" stop-color="#3B4BDB"/></linearGradient><linearGradient id="amazon-eks-color-16__paint1_linear_879_141" x1="15.693" x2="9.546" y1="10.544" y2="-.213" gradientUnits="userSpaceOnUse"><stop stop-color="#426DDB"/><stop offset="1" stop-color="#3B4BDB"/></linearGradient><linearGradient id="amazon-eks-color-16__paint2_linear_879_141" x1="9.433" x2="2.732" y1="14.904" y2="2.88" gradientUnits="userSpaceOnUse"><stop stop-color="#2775FF"/><stop offset="1" stop-color="#188DFF"/></linearGradient></defs></svg>
|
||||||
|
After Width: | Height: | Size: 1.7 KiB |
BIN
frontend/public/Logos/fluent-bit.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
frontend/public/Logos/fluentd.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
frontend/public/Logos/heroku.png
Normal file
|
After Width: | Height: | Size: 957 B |
BIN
frontend/public/Logos/http.png
Normal file
|
After Width: | Height: | Size: 4.3 KiB |