Compare commits
614 Commits
v0.22.0
...
jest-githu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ced74603c0 | ||
|
|
f59fb81109 | ||
|
|
507e68a0c1 | ||
|
|
4ad8a1f3ad | ||
|
|
19faf6a584 | ||
|
|
3978ada811 | ||
|
|
0a04fc04a5 | ||
|
|
7c9e333b84 | ||
|
|
dd78afb20f | ||
|
|
237d765376 | ||
|
|
85e865fb1b | ||
|
|
975e5daf03 | ||
|
|
8a532cca17 | ||
|
|
b9c908719f | ||
|
|
63c7b5e9e1 | ||
|
|
32eeb3d106 | ||
|
|
1a4ec2bf00 | ||
|
|
1d014ab4f7 | ||
|
|
418ab67d50 | ||
|
|
7efe907757 | ||
|
|
1d1154aa8c | ||
|
|
a16fca6376 | ||
|
|
9c1ea0cde9 | ||
|
|
ec500831ef | ||
|
|
fcbf82c2f3 | ||
|
|
a805eb7533 | ||
|
|
a8edc4fd95 | ||
|
|
c66c8c2823 | ||
|
|
c7b59d4405 | ||
|
|
f56b5cb971 | ||
|
|
29b1344557 | ||
|
|
55664872bd | ||
|
|
221861230a | ||
|
|
8b1a781f58 | ||
|
|
b557ca5519 | ||
|
|
e557ff273f | ||
|
|
3c284fc9ee | ||
|
|
bcebe050b1 | ||
|
|
9360c61dca | ||
|
|
fb1dbdc05e | ||
|
|
6170b2c5dc | ||
|
|
9826ab04b3 | ||
|
|
fd9566d471 | ||
|
|
3a1e8d523a | ||
|
|
6dd34a7f29 | ||
|
|
170e5e1686 | ||
|
|
16502feaad | ||
|
|
09d579311e | ||
|
|
8072fede85 | ||
|
|
112783d618 | ||
|
|
4644b1c200 | ||
|
|
bb09c84679 | ||
|
|
fc5f0fbf9e | ||
|
|
d6f0559adc | ||
|
|
0d7f7df76c | ||
|
|
7104d8e0f5 | ||
|
|
a20693fa9f | ||
|
|
0b991331d7 | ||
|
|
aad44a1037 | ||
|
|
3e29161fea | ||
|
|
b616dca52d | ||
|
|
be519666a3 | ||
|
|
a48edac13b | ||
|
|
0a77c7ab85 | ||
|
|
9fb32acf6d | ||
|
|
b2d6d75eef | ||
|
|
07d126c669 | ||
|
|
50d584cc89 | ||
|
|
1b6b3c2fdf | ||
|
|
1f0fdfd403 | ||
|
|
ae3b604cdc | ||
|
|
381f497b95 | ||
|
|
8045c4e5ae | ||
|
|
7451e885c3 | ||
|
|
01df53074c | ||
|
|
b6a79ab22c | ||
|
|
dae817640b | ||
|
|
16839eb7d3 | ||
|
|
780a863943 | ||
|
|
5e0b6366cc | ||
|
|
8eb2b9e3d0 | ||
|
|
97ed163002 | ||
|
|
e18bb7d5bc | ||
|
|
1e4cf2513c | ||
|
|
988ede7776 | ||
|
|
d1acad8ee4 | ||
|
|
f5b1d4146f | ||
|
|
feaac39e2a | ||
|
|
fc4cdea539 | ||
|
|
399d49b3c0 | ||
|
|
ec8a74d385 | ||
|
|
7c87310fa6 | ||
|
|
349c4020f5 | ||
|
|
92e2f1c467 | ||
|
|
e3a89be86b | ||
|
|
40090aaf12 | ||
|
|
4009ac83fe | ||
|
|
e7f9c3981b | ||
|
|
fe75f6347b | ||
|
|
bc72b5fcea | ||
|
|
a54cf38e21 | ||
|
|
94d99ee0a4 | ||
|
|
c109636889 | ||
|
|
d9950d9223 | ||
|
|
a578f9509a | ||
|
|
b1e4ee1d26 | ||
|
|
31b07cc02c | ||
|
|
d42bf50ddb | ||
|
|
93a11b2031 | ||
|
|
af71474bec | ||
|
|
bc942d218b | ||
|
|
f2e7f09a32 | ||
|
|
7e87df2d69 | ||
|
|
c0226ab584 | ||
|
|
84f2885533 | ||
|
|
e58ecff19b | ||
|
|
f4ecfb510a | ||
|
|
c4536f9069 | ||
|
|
2a55f3d680 | ||
|
|
5d6eea3045 | ||
|
|
12029a6d90 | ||
|
|
4083970289 | ||
|
|
b3c0681a85 | ||
|
|
36aced6d1a | ||
|
|
bad69abcc2 | ||
|
|
d091d90d66 | ||
|
|
29bfdb8909 | ||
|
|
31b5635339 | ||
|
|
73fc262f04 | ||
|
|
dc23368f6e | ||
|
|
75526c6de5 | ||
|
|
5b419cb668 | ||
|
|
d8a8430a5b | ||
|
|
dc7a55e871 | ||
|
|
9333fdcd0b | ||
|
|
58ccbdbec4 | ||
|
|
12819113c1 | ||
|
|
37f61ebe60 | ||
|
|
f2f89eb38b | ||
|
|
a99d7f09a1 | ||
|
|
2ae75e6196 | ||
|
|
f86fc03fd6 | ||
|
|
5a9f626da5 | ||
|
|
758013d7cd | ||
|
|
ddc3cc4911 | ||
|
|
6b2f857a12 | ||
|
|
30b0d42604 | ||
|
|
88aabb2060 | ||
|
|
f939d41acd | ||
|
|
d165f727ac | ||
|
|
e4ef137c72 | ||
|
|
dda01678e8 | ||
|
|
3e65543b5f | ||
|
|
050b866173 | ||
|
|
0906886e9a | ||
|
|
8371670512 | ||
|
|
123f2e7d52 | ||
|
|
0ab09c1c67 | ||
|
|
9f5039dbf3 | ||
|
|
5e349d8294 | ||
|
|
b5654c8bfa | ||
|
|
71e487dc0c | ||
|
|
2d60805b28 | ||
|
|
7603e0ebe0 | ||
|
|
1e8a8d19ea | ||
|
|
092d164d55 | ||
|
|
0400d5378b | ||
|
|
626da7533e | ||
|
|
bff7142a61 | ||
|
|
ed3017d247 | ||
|
|
ec3eba612c | ||
|
|
b958a06ba0 | ||
|
|
64f0ff05f9 | ||
|
|
f94a5f4481 | ||
|
|
27869f03bd | ||
|
|
9c21449239 | ||
|
|
991e39aad3 | ||
|
|
eddb607c9c | ||
|
|
3341cb7396 | ||
|
|
4ca1e34378 | ||
|
|
658a9cc11b | ||
|
|
4ef973ceb6 | ||
|
|
bbfaad15c2 | ||
|
|
45ead71359 | ||
|
|
79aef73767 | ||
|
|
fc49833c9f | ||
|
|
b34eafcab1 | ||
|
|
ed4ba1aa24 | ||
|
|
f427bac993 | ||
|
|
7de3cec477 | ||
|
|
856c04220f | ||
|
|
6a8096b8d7 | ||
|
|
9bad663c4f | ||
|
|
720a735338 | ||
|
|
1ad7ba0afd | ||
|
|
176d01544e | ||
|
|
c55be0e392 | ||
|
|
2c2775c766 | ||
|
|
f90ae99018 | ||
|
|
e12cf3e494 | ||
|
|
f12abfbe01 | ||
|
|
7faab85b4d | ||
|
|
5e0c068cb9 | ||
|
|
7a18bddce3 | ||
|
|
0c11b12744 | ||
|
|
ba05991222 | ||
|
|
1f17095e11 | ||
|
|
ab42700245 | ||
|
|
3f912edc98 | ||
|
|
63b503a9fb | ||
|
|
90f7ba191b | ||
|
|
53a78211ef | ||
|
|
838860da40 | ||
|
|
6b2427f1c2 | ||
|
|
e3d08a4275 | ||
|
|
814431e3a8 | ||
|
|
6e20fbb174 | ||
|
|
53dee57e17 | ||
|
|
5c5ee2cc70 | ||
|
|
e0b83bda62 | ||
|
|
f7fe64a8df | ||
|
|
377dbd8aec | ||
|
|
f8d3fa0fdb | ||
|
|
5b858f2963 | ||
|
|
3620cdb5d2 | ||
|
|
546d98ca9c | ||
|
|
cb155a1172 | ||
|
|
ad62106cad | ||
|
|
2d6c5f43a1 | ||
|
|
9a433891f2 | ||
|
|
3c63d66591 | ||
|
|
5b69559762 | ||
|
|
d7a5c6d65b | ||
|
|
1588d3a199 | ||
|
|
d5df9a1f7f | ||
|
|
2be3d35952 | ||
|
|
7fa50070ce | ||
|
|
2494b64ccd | ||
|
|
ca3283fcad | ||
|
|
a912731cc7 | ||
|
|
1a855582a7 | ||
|
|
f3c00e1a57 | ||
|
|
0d3cbb1db2 | ||
|
|
2c96512a8a | ||
|
|
a84a70df14 | ||
|
|
dcea79cef3 | ||
|
|
b12365ba07 | ||
|
|
718eb7b381 | ||
|
|
503417719c | ||
|
|
e7a5eb7b22 | ||
|
|
b14f800fee | ||
|
|
9e91375632 | ||
|
|
d7d4000240 | ||
|
|
e12aef136a | ||
|
|
0e04b779a9 | ||
|
|
587034f573 | ||
|
|
321cba2af5 | ||
|
|
abed60bdfa | ||
|
|
a306fb64cb | ||
|
|
0ad5d67140 | ||
|
|
11863040bb | ||
|
|
a67a3837c8 | ||
|
|
81b10d126a | ||
|
|
9f751688cc | ||
|
|
3d0fbd0065 | ||
|
|
05ea814c61 | ||
|
|
92ba46b2f5 | ||
|
|
4bbe1ea614 | ||
|
|
e3a251ef29 | ||
|
|
a4e0d9c7df | ||
|
|
4076cd9847 | ||
|
|
e3f4fc2967 | ||
|
|
bccefc6a10 | ||
|
|
821471f4ab | ||
|
|
1e242b6d06 | ||
|
|
4ca5176836 | ||
|
|
7f397d529b | ||
|
|
656f354fdc | ||
|
|
4cc3ce224c | ||
|
|
a4a285c074 | ||
|
|
a8f8580606 | ||
|
|
e24918044e | ||
|
|
28d346eafb | ||
|
|
cbd2f4c643 | ||
|
|
fcedc9e445 | ||
|
|
d2d3c4bb36 | ||
|
|
dc4acc0730 | ||
|
|
043e5ca880 | ||
|
|
5c437dd8f9 | ||
|
|
31b898b2c6 | ||
|
|
e186474414 | ||
|
|
8bfb0b5088 | ||
|
|
045a31ac92 | ||
|
|
c9654a6b52 | ||
|
|
30e0924bfb | ||
|
|
ccada08db5 | ||
|
|
6654dd2672 | ||
|
|
4b0a7cc4d3 | ||
|
|
04acc49154 | ||
|
|
3db8a25eb9 | ||
|
|
8324d010ae | ||
|
|
f0022cd13f | ||
|
|
655c92cfef | ||
|
|
735ab8e118 | ||
|
|
b0861f4fe0 | ||
|
|
61b6779a31 | ||
|
|
416a058eab | ||
|
|
4227faa6b5 | ||
|
|
9d3c4598ac | ||
|
|
6aba701cca | ||
|
|
cf1b0c2f24 | ||
|
|
231c2fd281 | ||
|
|
e3f17b5420 | ||
|
|
56f1f71461 | ||
|
|
72f4578152 | ||
|
|
9c8125ffc1 | ||
|
|
85d7752350 | ||
|
|
63ba9fb5e0 | ||
|
|
14a59a26b2 | ||
|
|
178c154263 | ||
|
|
122488c2c1 | ||
|
|
ad3fbd7599 | ||
|
|
1ad1ca5385 | ||
|
|
2fc82ffa59 | ||
|
|
ed809474d6 | ||
|
|
a8c4a91001 | ||
|
|
1ffb1b4a5d | ||
|
|
9a00998930 | ||
|
|
c60f612e0e | ||
|
|
7839134532 | ||
|
|
714a2ef4fd | ||
|
|
7f98a65022 | ||
|
|
616e3af18f | ||
|
|
17ae197bc3 | ||
|
|
27cda7a437 | ||
|
|
54a2309d8f | ||
|
|
748c78e9c3 | ||
|
|
7ae0326f61 | ||
|
|
c15240abab | ||
|
|
2fe9e53766 | ||
|
|
7209ac0007 | ||
|
|
96adc7f61c | ||
|
|
7b4fd55aeb | ||
|
|
86c34bd87d | ||
|
|
41f7a7993d | ||
|
|
dfd94f67bd | ||
|
|
052c32ce78 | ||
|
|
004f10e73b | ||
|
|
f17608fa10 | ||
|
|
8de8a8a86a | ||
|
|
0486721b35 | ||
|
|
14bbc609d8 | ||
|
|
f450d71a25 | ||
|
|
921fca5e67 | ||
|
|
1f7e70fa16 | ||
|
|
3ca048bc76 | ||
|
|
37e36626ab | ||
|
|
07808a8664 | ||
|
|
16d490fbe3 | ||
|
|
b1cee71621 | ||
|
|
e55a4da2bc | ||
|
|
0fa0e64697 | ||
|
|
c54fffb51d | ||
|
|
294b6966bf | ||
|
|
9aa72f847c | ||
|
|
32a55f3c4f | ||
|
|
8e6a7f13a1 | ||
|
|
89e8fb715c | ||
|
|
21de4bbd1b | ||
|
|
d4cc7c88c3 | ||
|
|
0e9e29e650 | ||
|
|
176725a4b4 | ||
|
|
88560e7c43 | ||
|
|
2538899544 | ||
|
|
ae0fc32fe1 | ||
|
|
859875667a | ||
|
|
674dee5a1b | ||
|
|
0318e53fdc | ||
|
|
b768840d36 | ||
|
|
ccc8be009c | ||
|
|
59549c36de | ||
|
|
aeee8b4cb2 | ||
|
|
03acc33888 | ||
|
|
b2a943769b | ||
|
|
1501ed0c5d | ||
|
|
218eb5379e | ||
|
|
e596dd77bd | ||
|
|
1138c6e41a | ||
|
|
15f328eb9e | ||
|
|
01312ec286 | ||
|
|
0574350e6e | ||
|
|
7e297dcb75 | ||
|
|
d184486978 | ||
|
|
337e33eb8a | ||
|
|
988dd1bcf0 | ||
|
|
25fc7b83ec | ||
|
|
893fcfa6ee | ||
|
|
23f94538c8 | ||
|
|
7586b50c5a | ||
|
|
0bee0a6d90 | ||
|
|
f89f3c0b14 | ||
|
|
598e71eb8e | ||
|
|
4d14416a08 | ||
|
|
2657276a80 | ||
|
|
48538e6b96 | ||
|
|
9337ff4b41 | ||
|
|
02cd069bb2 | ||
|
|
6a71d311b3 | ||
|
|
0b8367c817 | ||
|
|
b20fc39e08 | ||
|
|
1dd7bdb100 | ||
|
|
591ea96285 | ||
|
|
ee6b290a0c | ||
|
|
a37476a09b | ||
|
|
d41805a3b0 | ||
|
|
5d6bb18679 | ||
|
|
a393ea4d68 | ||
|
|
26b95f1b9f | ||
|
|
34be2953d3 | ||
|
|
e04d5fa7e8 | ||
|
|
2df5a9d72d | ||
|
|
b6e111b835 | ||
|
|
6a4aa7e5f5 | ||
|
|
962fc5e9ff | ||
|
|
4d5ee861ec | ||
|
|
2aef4578b0 | ||
|
|
f8bfd1abc4 | ||
|
|
a75f4f02d6 | ||
|
|
5ae4a59060 | ||
|
|
896836b57d | ||
|
|
06bedc92dc | ||
|
|
8a05b32a30 | ||
|
|
5bca66fedf | ||
|
|
6fb071cf37 | ||
|
|
9488ce8585 | ||
|
|
458cff32fd | ||
|
|
c8bad4fc79 | ||
|
|
5ac105beca | ||
|
|
865409d725 | ||
|
|
7554bce11c | ||
|
|
9a6fcb6b1d | ||
|
|
8ef4c0bcdd | ||
|
|
ded2c98167 | ||
|
|
22d4c53a43 | ||
|
|
7a4156a3b7 | ||
|
|
8844144c01 | ||
|
|
f8ec850670 | ||
|
|
ee76cf6294 | ||
|
|
2bf534b56f | ||
|
|
c37d6c3785 | ||
|
|
17c61a61ec | ||
|
|
ec7c99dd26 | ||
|
|
03220fcf11 | ||
|
|
233589b867 | ||
|
|
0946bcd5fc | ||
|
|
1fc0461c0f | ||
|
|
86b725757c | ||
|
|
b6004ce157 | ||
|
|
87c244ccfa | ||
|
|
557ebcec79 | ||
|
|
6363c71442 | ||
|
|
74c4a36e26 | ||
|
|
abb8b2b122 | ||
|
|
c35f2ec0aa | ||
|
|
3b22698e35 | ||
|
|
900752b6e2 | ||
|
|
f47c23032c | ||
|
|
7ad489ebb4 | ||
|
|
3d03ad52b1 | ||
|
|
37349786f1 | ||
|
|
c6ac8df707 | ||
|
|
ae3d4fece8 | ||
|
|
d63ae429bd | ||
|
|
171cea14e2 | ||
|
|
3d5dc05c08 | ||
|
|
d14bd7386e | ||
|
|
afd893b8b5 | ||
|
|
10141a207b | ||
|
|
daebea701d | ||
|
|
7fb29ad2ee | ||
|
|
e61b0a4d67 | ||
|
|
ff392ba883 | ||
|
|
9aa1fb366a | ||
|
|
49b456cc7f | ||
|
|
ed65c92fc7 | ||
|
|
558352e43b | ||
|
|
dd1e845095 | ||
|
|
db4101e795 | ||
|
|
b339f0509b | ||
|
|
668f0c6e2b | ||
|
|
d4024d1af9 | ||
|
|
355a297795 | ||
|
|
aada9060da | ||
|
|
5bf64053b7 | ||
|
|
f1fff4ca0c | ||
|
|
562621a117 | ||
|
|
68ab022836 | ||
|
|
2cdafa0564 | ||
|
|
fafa6f9960 | ||
|
|
54738432af | ||
|
|
23e6902921 | ||
|
|
348fa0ba5e | ||
|
|
1a3e46cecd | ||
|
|
4397c53494 | ||
|
|
fabdf87ed1 | ||
|
|
5f89e84eaf | ||
|
|
7493193e66 | ||
|
|
378fdb522e | ||
|
|
bc4a4edc7f | ||
|
|
5c83d133a2 | ||
|
|
5469cd34fa | ||
|
|
a2c03243cb | ||
|
|
4753868298 | ||
|
|
203eef8cde | ||
|
|
b915f9ef7b | ||
|
|
2c5c972801 | ||
|
|
872759f579 | ||
|
|
e86e045a28 | ||
|
|
7b85ece796 | ||
|
|
127ccdacb4 | ||
|
|
cac637ac88 | ||
|
|
b9409820cc | ||
|
|
e6fa1383f3 | ||
|
|
59deac01bd | ||
|
|
55f49c38c7 | ||
|
|
7e220a9f61 | ||
|
|
ace2d8a3b3 | ||
|
|
c4ce057d7a | ||
|
|
ef0e63c35b | ||
|
|
765153caa8 | ||
|
|
6d7081a4bd | ||
|
|
f1818235dc | ||
|
|
c321a1741f | ||
|
|
a235beae36 | ||
|
|
98a2ef4080 | ||
|
|
5a2a987a9b | ||
|
|
7822b4efee | ||
|
|
8f1451e154 | ||
|
|
07833b9859 | ||
|
|
0de40a889d | ||
|
|
b3a6deb71b | ||
|
|
b5af238374 | ||
|
|
49afc2549f | ||
|
|
0ed6594e48 | ||
|
|
50142321f7 | ||
|
|
29df1188c7 | ||
|
|
f3817d7335 | ||
|
|
fea8a71f51 | ||
|
|
5e89211f53 | ||
|
|
0beffb50ca | ||
|
|
6efa1011aa | ||
|
|
c68b611ad9 | ||
|
|
69828548b1 | ||
|
|
22d0aa951c | ||
|
|
7f9ba6c43a | ||
|
|
7a177e18e4 | ||
|
|
433f930956 | ||
|
|
206e8b8dc3 | ||
|
|
225b2248c8 | ||
|
|
783a54a8ee | ||
|
|
216499051d | ||
|
|
6b77165d09 | ||
|
|
d26022efb1 | ||
|
|
60c0836d3e | ||
|
|
7f162e5381 | ||
|
|
98745fc307 | ||
|
|
08d496e314 | ||
|
|
538261aa99 | ||
|
|
10ffbf7d81 | ||
|
|
8e20ca8405 | ||
|
|
7818f918a8 | ||
|
|
5bfcc1db70 | ||
|
|
39c6410bbe | ||
|
|
149fdebfaa | ||
|
|
915738e1f7 | ||
|
|
340f14be3d | ||
|
|
a6e6c171c3 | ||
|
|
1d1ddbef40 | ||
|
|
d7f5e5d6ac | ||
|
|
c656803162 | ||
|
|
e746bae8db | ||
|
|
54899930b0 | ||
|
|
ac40e08474 | ||
|
|
c51a15f1e8 | ||
|
|
5346cdd283 | ||
|
|
cc62d2cf71 | ||
|
|
44416df5dc | ||
|
|
a1a5e8bf9b | ||
|
|
652bd52ea7 | ||
|
|
ed200e50c8 | ||
|
|
f0f93c64d2 | ||
|
|
c0d10f0d88 | ||
|
|
e3e0787459 | ||
|
|
c72729f8bc | ||
|
|
2b3934b845 | ||
|
|
2e85bd0264 | ||
|
|
e3d26d3f10 | ||
|
|
5042c56b4c | ||
|
|
84c81b054c | ||
|
|
532ebdc856 | ||
|
|
f18b073810 | ||
|
|
857e505323 | ||
|
|
1295e179b2 | ||
|
|
193dca3a2b | ||
|
|
360029f350 | ||
|
|
eb2a955323 | ||
|
|
1d00ac9ded | ||
|
|
d4b95b4848 | ||
|
|
0750231b4b | ||
|
|
c1664dde6a | ||
|
|
197ccca30f | ||
|
|
76ba364317 | ||
|
|
e97609ce23 | ||
|
|
720edb162e |
8
.github/CODEOWNERS
vendored
@@ -3,11 +3,9 @@
|
|||||||
# that they own.
|
# that they own.
|
||||||
* @ankitnayan
|
* @ankitnayan
|
||||||
|
|
||||||
/frontend/ @palashgdev
|
/frontend/ @palashgdev @YounixM
|
||||||
|
/frontend/src/container/MetricsApplication @srikanthccv
|
||||||
|
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||||
/deploy/ @prashant-shahi
|
/deploy/ @prashant-shahi
|
||||||
/sample-apps/ @prashant-shahi
|
/sample-apps/ @prashant-shahi
|
||||||
**/query-service/ @srikanthccv
|
|
||||||
Makefile @srikanthccv
|
|
||||||
go.* @srikanthccv
|
|
||||||
.git* @srikanthccv
|
|
||||||
.github @prashant-shahi
|
.github @prashant-shahi
|
||||||
|
|||||||
17
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
### Summary
|
||||||
|
|
||||||
|
<!-- ✍️ A clear and concise description...-->
|
||||||
|
|
||||||
|
#### Related Issues / PR's
|
||||||
|
|
||||||
|
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
|
||||||
|
|
||||||
|
#### Screenshots
|
||||||
|
|
||||||
|
NA
|
||||||
|
|
||||||
|
<!-- ✍️ Add screenshots of before and after changes where applicable-->
|
||||||
|
|
||||||
|
#### Affected Areas and Manually Tested Areas
|
||||||
|
|
||||||
|
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
|
||||||
38
.github/workflows/build.yaml
vendored
@@ -12,7 +12,31 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Install dependencies
|
||||||
|
run: cd frontend && yarn install
|
||||||
|
- name: Run ESLint
|
||||||
|
run: cd frontend && npm run lint
|
||||||
|
- name: Run Jest
|
||||||
|
run: cd frontend && npm run jest
|
||||||
|
- name: TSC
|
||||||
|
run: yarn tsc
|
||||||
|
working-directory: ./frontend
|
||||||
|
- name: Build frontend docker image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
make build-frontend-amd64
|
||||||
|
|
||||||
|
build-frontend-ee:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Create .env file
|
||||||
|
run: |
|
||||||
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd frontend && yarn install
|
run: cd frontend && yarn install
|
||||||
- name: Run ESLint
|
- name: Run ESLint
|
||||||
@@ -31,7 +55,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -45,7 +73,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Build EE query-service image
|
- name: Build EE query-service image
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/codeql.yaml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
2
.github/workflows/commitlint.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
|||||||
lint-commits:
|
lint-commits:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: wagoid/commitlint-github-action@v5
|
- uses: wagoid/commitlint-github-action@v5
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Codebase
|
- name: Checkout Codebase
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: signoz/gh-bot
|
repository: signoz/gh-bot
|
||||||
- name: Use Node v16
|
- name: Use Node v16
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 16
|
node-version: 16
|
||||||
- name: Setup Cache & Install Dependencies
|
- name: Setup Cache & Install Dependencies
|
||||||
|
|||||||
2
.github/workflows/dependency-review.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Checkout Repository'
|
- name: 'Checkout Repository'
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
with:
|
with:
|
||||||
fail-on-severity: high
|
fail-on-severity: high
|
||||||
|
|||||||
4
.github/workflows/e2e-k3s.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
DOCKER_TAG: pull-${{ github.event.number }}
|
DOCKER_TAG: pull-${{ github.event.number }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Build query-service image
|
- name: Build query-service image
|
||||||
env:
|
env:
|
||||||
@@ -37,7 +37,7 @@ jobs:
|
|||||||
kubectl create ns sample-application
|
kubectl create ns sample-application
|
||||||
|
|
||||||
# apply hotrod k8s manifest file
|
# apply hotrod k8s manifest file
|
||||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||||
|
|
||||||
# wait for all deployments in sample-application namespace to be READY
|
# wait for all deployments in sample-application namespace to be READY
|
||||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||||
|
|||||||
32
.github/workflows/jest-code-coverage.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: Code Coverage
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
- main
|
||||||
|
- release/v*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
- main
|
||||||
|
- release/v*
|
||||||
|
jobs:
|
||||||
|
coverage:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
checks: write
|
||||||
|
pull-requests: write
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout Repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- uses: jwalton/gh-find-current-pr@v1
|
||||||
|
id: findPr
|
||||||
|
- uses: ArtiomTr/jest-coverage-report-action@v2
|
||||||
|
with:
|
||||||
|
package-manager: yarn
|
||||||
|
working-directory: frontend
|
||||||
|
test-script: yarn jest:coverage
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
output: comment
|
||||||
|
prnumber: ${{ steps.findPr.outputs.number }}
|
||||||
4
.github/workflows/playwright.yaml
vendored
@@ -9,8 +9,8 @@ jobs:
|
|||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v3
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: "16.x"
|
node-version: "16.x"
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
|
|||||||
79
.github/workflows/push.yaml
vendored
@@ -14,7 +14,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
@@ -30,7 +34,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -42,6 +46,11 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-query-service
|
run: make build-push-query-service
|
||||||
|
|
||||||
@@ -49,7 +58,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup golang
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
@@ -65,7 +78,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -77,6 +90,11 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-ee-query-service
|
run: make build-push-ee-query-service
|
||||||
|
|
||||||
@@ -84,7 +102,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
run: yarn install
|
run: yarn install
|
||||||
@@ -109,7 +127,7 @@ jobs:
|
|||||||
id: short-sha
|
id: short-sha
|
||||||
- name: Get branch name
|
- name: Get branch name
|
||||||
id: branch-name
|
id: branch-name
|
||||||
uses: tj-actions/branch-names@v5.1
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
- name: Set docker tag environment
|
- name: Set docker tag environment
|
||||||
run: |
|
run: |
|
||||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
@@ -123,3 +141,52 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
- name: Build and push docker image
|
- name: Build and push docker image
|
||||||
run: make build-push-frontend
|
run: make build-push-frontend
|
||||||
|
|
||||||
|
image-build-and-push-frontend-ee:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Create .env file
|
||||||
|
run: |
|
||||||
|
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||||
|
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||||
|
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: yarn install
|
||||||
|
- name: Run Prettier
|
||||||
|
working-directory: frontend
|
||||||
|
run: npm run prettify
|
||||||
|
continue-on-error: true
|
||||||
|
- name: Run ESLint
|
||||||
|
working-directory: frontend
|
||||||
|
run: npm run lint
|
||||||
|
continue-on-error: true
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- uses: benjlevesque/short-sha@v2.2
|
||||||
|
id: short-sha
|
||||||
|
- name: Get branch name
|
||||||
|
id: branch-name
|
||||||
|
uses: tj-actions/branch-names@v7.0.7
|
||||||
|
- name: Set docker tag environment
|
||||||
|
run: |
|
||||||
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
|
tag="${{ steps.branch-name.outputs.tag }}"
|
||||||
|
tag="${tag:1}"
|
||||||
|
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
|
||||||
|
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||||
|
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
- name: Build and push docker image
|
||||||
|
run: make build-push-frontend
|
||||||
|
|||||||
2
.github/workflows/sonar.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Sonar analysis
|
- name: Sonar analysis
|
||||||
|
|||||||
2
.github/workflows/staging-deployment.yaml
vendored
@@ -26,8 +26,10 @@ jobs:
|
|||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
export OTELCOL_TAG="main"
|
export OTELCOL_TAG="main"
|
||||||
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
docker system prune --force
|
docker system prune --force
|
||||||
docker pull signoz/signoz-otel-collector:main
|
docker pull signoz/signoz-otel-collector:main
|
||||||
|
docker pull signoz/signoz-schema-migrator:main
|
||||||
cd ~/signoz
|
cd ~/signoz
|
||||||
git status
|
git status
|
||||||
git add .
|
git add .
|
||||||
|
|||||||
1
.github/workflows/testing-deployment.yaml
vendored
@@ -26,6 +26,7 @@ jobs:
|
|||||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||||
export DEV_BUILD="1"
|
export DEV_BUILD="1"
|
||||||
|
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||||
docker system prune --force
|
docker system prune --force
|
||||||
cd ~/signoz
|
cd ~/signoz
|
||||||
git status
|
git status
|
||||||
|
|||||||
11
.gitignore
vendored
@@ -37,7 +37,7 @@ frontend/src/constants/env.ts
|
|||||||
**/locust-scripts/__pycache__/
|
**/locust-scripts/__pycache__/
|
||||||
**/__debug_bin
|
**/__debug_bin
|
||||||
|
|
||||||
frontend/*.env
|
.env
|
||||||
pkg/query-service/signoz.db
|
pkg/query-service/signoz.db
|
||||||
|
|
||||||
pkg/query-service/tests/test-deploy/data/
|
pkg/query-service/tests/test-deploy/data/
|
||||||
@@ -53,3 +53,12 @@ ee/query-service/tests/test-deploy/data/
|
|||||||
bin/
|
bin/
|
||||||
|
|
||||||
*/query-service/queries.active
|
*/query-service/queries.active
|
||||||
|
|
||||||
|
# e2e
|
||||||
|
|
||||||
|
e2e/node_modules/
|
||||||
|
e2e/test-results/
|
||||||
|
e2e/playwright-report/
|
||||||
|
e2e/blob-report/
|
||||||
|
e2e/playwright/.cache/
|
||||||
|
e2e/.auth
|
||||||
@@ -338,7 +338,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
|||||||
**5.1.1 To install the HotROD sample app:**
|
**5.1.1 To install the HotROD sample app:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -361,7 +361,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
|||||||
**5.1.4 To delete the HotROD sample app:**
|
**5.1.4 To delete the HotROD sample app:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||||
| HOTROD_NAMESPACE=sample-application bash
|
| HOTROD_NAMESPACE=sample-application bash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
86
Makefile
@@ -8,6 +8,7 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
|||||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||||
|
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||||
|
|
||||||
# Internal variables or constants.
|
# Internal variables or constants.
|
||||||
FRONTEND_DIRECTORY ?= frontend
|
FRONTEND_DIRECTORY ?= frontend
|
||||||
@@ -15,15 +16,15 @@ QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
|||||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||||
LOCAL_GOOS ?= $(shell go env GOOS)
|
|
||||||
LOCAL_GOARCH ?= $(shell go env GOARCH)
|
GOOS ?= $(shell go env GOOS)
|
||||||
|
GOARCH ?= $(shell go env GOARCH)
|
||||||
|
GOPATH ?= $(shell go env GOPATH)
|
||||||
|
|
||||||
REPONAME ?= signoz
|
REPONAME ?= signoz
|
||||||
DOCKER_TAG ?= latest
|
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
|
||||||
|
|
||||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||||
DEV_BUILD ?= ""
|
|
||||||
|
|
||||||
# Build-time Go variables
|
# Build-time Go variables
|
||||||
PACKAGE?=go.signoz.io/signoz
|
PACKAGE?=go.signoz.io/signoz
|
||||||
@@ -37,10 +38,22 @@ LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildV
|
|||||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||||
|
|
||||||
all: build-push-frontend build-push-query-service
|
all: build-push-frontend build-push-query-service
|
||||||
|
|
||||||
|
# Steps to build static files of frontend
|
||||||
|
build-frontend-static:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building frontend static files"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
|
rm -rf build && \
|
||||||
|
CI=1 yarn install && \
|
||||||
|
yarn build && \
|
||||||
|
ls -l build
|
||||||
|
|
||||||
# Steps to build and push docker image of frontend
|
# Steps to build and push docker image of frontend
|
||||||
.PHONY: build-frontend-amd64 build-push-frontend
|
.PHONY: build-frontend-amd64 build-push-frontend
|
||||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||||
build-frontend-amd64:
|
build-frontend-amd64: build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building frontend docker image for amd64"
|
@echo "--> Building frontend docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -49,7 +62,7 @@ build-frontend-amd64:
|
|||||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of frontend(used in push pipeline)
|
# Step to build and push docker image of frontend(used in push pipeline)
|
||||||
build-push-frontend:
|
build-push-frontend: build-frontend-static
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing frontend docker image"
|
@echo "--> Building and pushing frontend docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@@ -57,24 +70,52 @@ build-push-frontend:
|
|||||||
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
||||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
|
# Steps to build static binary of query service
|
||||||
|
.PHONY: build-query-service-static
|
||||||
|
build-query-service-static:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building query-service static binary"
|
||||||
|
@echo "------------------"
|
||||||
|
@if [ $(DEV_BUILD) != "" ]; then \
|
||||||
|
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||||
|
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||||
|
else \
|
||||||
|
cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
|
||||||
|
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: build-query-service-static-amd64
|
||||||
|
build-query-service-static-amd64:
|
||||||
|
make GOARCH=amd64 build-query-service-static
|
||||||
|
|
||||||
|
.PHONY: build-query-service-static-arm64
|
||||||
|
build-query-service-static-arm64:
|
||||||
|
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
|
||||||
|
|
||||||
|
# Steps to build static binary of query service for all platforms
|
||||||
|
.PHONY: build-query-service-static-all
|
||||||
|
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
|
||||||
|
|
||||||
# Steps to build and push docker image of query service
|
# Steps to build and push docker image of query service
|
||||||
.PHONY: build-query-service-amd64 build-push-query-service
|
.PHONY: build-query-service-amd64 build-push-query-service
|
||||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||||
build-query-service-amd64:
|
build-query-service-amd64: build-query-service-static-amd64
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-query-service:
|
build-push-query-service: build-query-service-static-all
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
--push --platform linux/arm64,linux/amd64 \
|
||||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
||||||
@@ -82,24 +123,14 @@ build-ee-query-service-amd64:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@if [ $(DEV_BUILD) != "" ]; then \
|
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
|
||||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
|
|
||||||
else \
|
|
||||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
|
||||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
|
||||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-ee-query-service:
|
build-push-ee-query-service:
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
|
||||||
--progress plain --push --platform linux/arm64,linux/amd64 \
|
|
||||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
|
||||||
|
|
||||||
dev-setup:
|
dev-setup:
|
||||||
mkdir -p /var/lib/signoz
|
mkdir -p /var/lib/signoz
|
||||||
@@ -110,7 +141,7 @@ dev-setup:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
|
|
||||||
run-local:
|
run-local:
|
||||||
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
|
@docker-compose -f \
|
||||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||||
up --build -d
|
up --build -d
|
||||||
|
|
||||||
@@ -151,3 +182,6 @@ test:
|
|||||||
go test ./pkg/query-service/app/querier/...
|
go test ./pkg/query-service/app/querier/...
|
||||||
go test ./pkg/query-service/converter/...
|
go test ./pkg/query-service/converter/...
|
||||||
go test ./pkg/query-service/formatter/...
|
go test ./pkg/query-service/formatter/...
|
||||||
|
go test ./pkg/query-service/tests/integration/...
|
||||||
|
go test ./pkg/query-service/rules/...
|
||||||
|
go test ./pkg/query-service/collectorsimulator/...
|
||||||
|
|||||||
164
README.de-de.md
@@ -1,40 +1,75 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
|
|
||||||
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
|
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
||||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||||
<a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
</h3>
|
</h3>
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
|
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. Mit SigNoz können Sie Folgendes tun:
|
||||||
|
|
||||||
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
|
👉 Visualisieren Sie Metriken, Traces und Logs in einer einzigen Oberfläche.
|
||||||
|
|
||||||
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
|
👉 Sie können Metriken wie die p99-Latenz, Fehlerquoten für Ihre Dienste, externe API-Aufrufe und individuelle Endpunkte anzeigen.
|
||||||
|
|
||||||
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
|
👉 Sie können die Ursache des Problems ermitteln, indem Sie zu den genauen Traces gehen, die das Problem verursachen, und detaillierte Flammenbilder einzelner Anfragetraces anzeigen.
|
||||||
|
|
||||||
|
👉 Führen Sie Aggregationen auf Trace-Daten durch, um geschäftsrelevante Metriken zu erhalten.
|
||||||
|
|
||||||
|
👉 Filtern und Abfragen von Logs, Erstellen von Dashboards und Benachrichtigungen basierend auf Attributen in den Logs.
|
||||||
|
|
||||||
|
👉 Automatische Aufzeichnung von Ausnahmen in Python, Java, Ruby und Javascript.
|
||||||
|
|
||||||
|
👉 Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
||||||
|
|
||||||
|
##
|
||||||
|
### Anwendung Metriken
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
### Verteiltes Tracing
|
||||||
|
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||||
|
|
||||||
|
### Log Verwaltung
|
||||||
|
|
||||||
|
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||||
|
|
||||||
|
### Infrastruktur Überwachung
|
||||||
|
|
||||||
|
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||||
|
|
||||||
|
### Exceptions Monitoring
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
### Alarme
|
||||||
|
|
||||||
|
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
|
||||||
|
|
||||||
## Werde Teil unserer Slack Community
|
## Werde Teil unserer Slack Community
|
||||||
|
|
||||||
@@ -42,20 +77,22 @@ Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
|||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
|
||||||
|
|
||||||
## Funktionen:
|
## Funktionen:
|
||||||
|
|
||||||
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
|
- Einheitliche Benutzeroberfläche für Metriken, Traces und Logs. Keine Notwendigkeit, zwischen Prometheus und Jaeger zu wechseln, um Probleme zu debuggen oder ein separates Log-Tool wie Elastic neben Ihrer Metriken- und Traces-Stack zu verwenden.
|
||||||
- Übersicht der langsamsten Endpunkte deiner Anwendung.
|
- Überblick über Anwendungsmetriken wie RPS, Latenzzeiten des 50tes/90tes/99tes Perzentils und Fehlerquoten.
|
||||||
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
|
- Langsamste Endpunkte in Ihrer Anwendung.
|
||||||
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
|
- Zeigen Sie genaue Anfragetraces an, um Probleme in nachgelagerten Diensten, langsamen Datenbankabfragen oder Aufrufen von Drittanbieterdiensten wie Zahlungsgateways zu identifizieren.
|
||||||
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
|
- Filtern Sie Traces nach Dienstname, Operation, Latenz, Fehler, Tags/Annotationen.
|
||||||
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
|
- Führen Sie Aggregationen auf Trace-Daten (Ereignisse/Spans) durch, um geschäftsrelevante Metriken zu erhalten. Beispielsweise können Sie die Fehlerquote und die 99tes Perzentillatenz für `customer_type: gold` oder `deployment_version: v2` oder `external_call: paypal` erhalten.
|
||||||
|
- Native Unterstützung für OpenTelemetry-Logs, erweiterten Log-Abfrage-Builder und automatische Log-Sammlung aus dem Kubernetes-Cluster.
|
||||||
|
- Blitzschnelle Log-Analytik ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||||
|
- End-to-End-Sichtbarkeit der Infrastrukturleistung, Aufnahme von Metriken aus allen Arten von Host-Umgebungen.
|
||||||
|
- Einfache Einrichtung von Benachrichtigungen mit dem selbst erstellbaren Abfrage-Builder.
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
|
||||||
|
|
||||||
## Wieso SigNoz?
|
## Wieso SigNoz?
|
||||||
|
|
||||||
@@ -65,24 +102,28 @@ Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDo
|
|||||||
|
|
||||||
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
|
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
|
||||||
|
|
||||||
### Unterstützte Programmiersprachen:
|
### Languages supported:
|
||||||
|
|
||||||
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
|
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als Bibliothek, mit der Sie Ihre Anwendungen instrumentieren können. Daher wird jedes von OpenTelemetry unterstützte Framework und jede Sprache auch von SignNoz unterstützt. Einige der wichtigsten unterstützten Sprachen sind:
|
||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- NodeJS
|
||||||
- Go
|
- Go
|
||||||
|
- PHP
|
||||||
|
- .NET
|
||||||
|
- Ruby
|
||||||
|
- Elixir
|
||||||
|
- Rust
|
||||||
|
|
||||||
|
|
||||||
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
|
||||||
|
|
||||||
## Erste Schritte mit SigNoz
|
## Erste Schritte mit SigNoz
|
||||||
|
|
||||||
|
|
||||||
### Bereitstellung mit Docker
|
### Bereitstellung mit Docker
|
||||||
|
|
||||||
Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||||
@@ -90,20 +131,17 @@ Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Sch
|
|||||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/install/troubleshooting/) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/install/troubleshooting/) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
|
### Deploy in Kubernetes using Helm
|
||||||
### Bereitstellung mit Kubernetes und Helm
|
|
||||||
|
|
||||||
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
|
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
|
||||||
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
|
||||||
|
|
||||||
## Vergleiche mit anderen Lösungen
|
## Vergleiche mit bekannten Tools
|
||||||
|
|
||||||
### SigNoz vs. Prometheus
|
### SigNoz vs Prometheus
|
||||||
|
|
||||||
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
|
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
|
||||||
|
|
||||||
@@ -111,49 +149,79 @@ Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzels
|
|||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
### SigNoz vs. Jaeger
|
### SigNoz vs Jaeger
|
||||||
|
|
||||||
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
|
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
|
||||||
|
|
||||||
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
|
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
|
||||||
|
|
||||||
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
|
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an.
|
||||||
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
|
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag `customer_type=premium`, was hingegen mit SigNoz leicht umsetzbar ist.
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Elastic
|
||||||
|
|
||||||
|
- Die Verwaltung von SigNoz-Protokollen basiert auf 'ClickHouse', einem spaltenbasierten OLAP-Datenspeicher, der aggregierte Protokollanalyseabfragen wesentlich effizienter macht.
|
||||||
|
- 50 % geringerer Ressourcenbedarf im Vergleich zu Elastic während der Aufnahme.
|
||||||
|
|
||||||
|
Wir haben Benchmarks veröffentlicht, die Elastic mit SignNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Loki
|
||||||
|
|
||||||
|
- SigNoz unterstützt Aggregationen von Daten mit hoher Kardinalität über ein großes Volumen, Loki hingegen nicht.
|
||||||
|
- SigNoz unterstützt Indizes über Daten mit hoher Kardinalität und hat keine Beschränkungen hinsichtlich der Anzahl der Indizes, während Loki maximale Streams erreicht, wenn ein paar Indizes hinzugefügt werden.
|
||||||
|
- Das Durchsuchen großer Datenmengen ist in Loki im Vergleich zu SigNoz schwierig und langsam.
|
||||||
|
|
||||||
|
Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen Sie es sich [hier](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
|
||||||
|
|
||||||
## Zum Projekt beitragen
|
## Zum Projekt beitragen
|
||||||
|
|
||||||
|
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||||
|
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
|
||||||
|
|
||||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
### Unsere Projektbetreuer
|
||||||
|
|
||||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
|
#### Backend
|
||||||
|
|
||||||
|
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||||
|
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||||
|
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||||
|
- [Vishal Sharma](https://github.com/makeavish)
|
||||||
|
|
||||||
|
#### Frontend
|
||||||
|
|
||||||
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
|
||||||
|
#### DevOps
|
||||||
|
|
||||||
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
|
||||||
|
|
||||||
## Dokumentation
|
## Dokumentation
|
||||||
|
|
||||||
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
|
||||||
|
|
||||||
## Community
|
## Gemeinschaft
|
||||||
|
|
||||||
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
Werde Teil der [slack community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||||
|
|
||||||
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
|
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
|
||||||
|
|
||||||
Wie immer, danke an unsere großartigen Unterstützer!
|
Wie immer, Dank an unsere großartigen Mitwirkenden!
|
||||||
|
|
||||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
|
|||||||
203
README.zh-cn.md
@@ -1,170 +1,225 @@
|
|||||||
<p align="center">
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
|
||||||
|
|
||||||
<p align="center">监视你的应用,并可排查已部署应用中的问题,这是一个开源的可替代DataDog、NewRelic的方案</p>
|
<p align="center">监控你的应用,并且可排查已部署应用的问题,这是一个可替代 DataDog、NewRelic 的开源方案</p>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
##
|
<h3 align="center">
|
||||||
|
<a href="https://signoz.io/docs"><b>文档</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||||
|
<a href="https://signoz.io/slack"><b>Slack 社区</b></a> •
|
||||||
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
|
##
|
||||||
|
|
||||||
👉 你能看到一些性能指标,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
SigNoz 帮助开发人员监控应用并排查已部署应用的问题。你可以使用 SigNoz 实现如下能力:
|
||||||
|
|
||||||
👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
👉 在同一块面板上,可视化 Metrics, Traces 和 Logs 内容。
|
||||||
|
|
||||||
👉 聚合trace数据来获得业务相关指标。
|
👉 你可以关注服务的 p99 延迟和错误率, 包括外部 API 调用和个别的端点。
|
||||||
|
|
||||||

|
👉 你可以找到问题的根因,通过提取相关问题的 traces 日志、单独查看请求 traces 的火焰图详情。
|
||||||
<br />
|
|
||||||

|
👉 执行 trace 数据聚合,以获取业务相关的 metrics
|
||||||
<br />
|
|
||||||

|
👉 对日志过滤和查询,通过日志的属性建立看板和告警
|
||||||
|
|
||||||
|
👉 通过 Python,java,Ruby 和 Javascript 自动记录异常
|
||||||
|
|
||||||
|
👉 轻松的自定义查询和设置告警
|
||||||
|
|
||||||
|
### 应用 Metrics 展示
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 分布式追踪
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||||
|
|
||||||
|
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||||
|
|
||||||
|
### 日志管理
|
||||||
|
|
||||||
|
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||||
|
|
||||||
|
### 基础设施监控
|
||||||
|
|
||||||
|
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||||
|
|
||||||
|
### 异常监控
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 告警
|
||||||
|
|
||||||
|
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
## 加入我们 Slack 社区
|
||||||
|
|
||||||
## 加入我们的Slack社区
|
来 [Slack](https://signoz.io/slack) 和我们打招呼吧 👋
|
||||||
|
|
||||||
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
## 特性:
|
||||||
|
|
||||||
## 功能:
|
- 为 metrics, traces and logs 制定统一的 UI。 无需切换 Prometheus 到 Jaeger 去查找问题,也无需使用想 Elastic 这样的日志工具分开你的 metrics 和 traces
|
||||||
|
|
||||||
- 应用概览指标(metrics),如RPS, p50/p90/p99延迟率分位值,错误率等。
|
- 默认统计应用的 metrics 数据,像 RPS (每秒请求数), 50th/90th/99th 的分位数延迟数据,还有相关的错误率
|
||||||
- 应用中最慢的终端(endpoint)
|
|
||||||
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
- 找到应用中最慢的端点
|
||||||
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
|
|
||||||
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
|
- 查看准确的请求跟踪数据,找到下游服务的问题了,比如 DB 慢查询,或者调用第三方的支付网关等
|
||||||
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
|
|
||||||
|
- 通过 服务名、操作方式、延迟、错误、标签/注释 过滤 traces 数据
|
||||||
|
|
||||||
|
- 通过聚合 trace 数据而获得业务相关的 metrics。 比如你可以通过 `customer_type: gold` 或者 `deployment_version: v2` 或者 `external_call: paypal` 获取错误率和 P99 延迟数据
|
||||||
|
|
||||||
|
- 原生支持 OpenTelemetry 日志,高级日志查询,自动收集 k8s 相关日志
|
||||||
|
|
||||||
|
- 快如闪电的日志分析 ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||||
|
|
||||||
|
- 可视化点到点的基础设施性能,提取有所有类型机器的 metrics 数据
|
||||||
|
|
||||||
|
- 轻易自定义告警查询
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
## 为什么使用 SigNoz?
|
||||||
|
|
||||||
## 为何选择SigNoz?
|
作为开发者, 我们发现 SaaS 厂商对一些大家想要的小功能都是闭源的,这种行为真的让人有点恼火。 闭源厂商还会在月底给你一张没有明细的巨额账单。
|
||||||
|
|
||||||
作为开发人员,我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦,闭源厂商通常会给你一份巨额月付账单,但不提供足够的透明度,你不知道你为哪些功能付费。
|
我们想做一个自托管并且可开源的工具,像 DataDog 和 NewRelic 那样, 为那些担心数据隐私和安全的公司提供第三方服务。
|
||||||
|
|
||||||
我们想做一个自服务的开源版本的工具,类似于DataDog和NewRelic,用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
|
作为开源的项目,你完全可以自己掌控你的配置、样本和更新。你同样可以基于 SigNoz 拓展特定的业务模块。
|
||||||
|
|
||||||
开源也让你对配置、采样和正常运行时间有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
### 支持的编程语言:
|
||||||
|
|
||||||
### 语言支持
|
我们支持 [OpenTelemetry](https://opentelemetry.io)。作为一个观测你应用的库文件。所以任何 OpenTelemetry 支持的框架和语言,对于 SigNoz 也同样支持。 一些主要支持的语言如下:
|
||||||
|
|
||||||
我们支持[OpenTelemetry](https://opentelemetry.io)库,你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
|
|
||||||
|
|
||||||
- Java
|
- Java
|
||||||
- Python
|
- Python
|
||||||
- NodeJS
|
- NodeJS
|
||||||
- Go
|
- Go
|
||||||
|
- PHP
|
||||||
|
- .NET
|
||||||
|
- Ruby
|
||||||
|
- Elixir
|
||||||
|
- Rust
|
||||||
|
|
||||||
你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
|
你可以在这里找到全部支持的语言列表 - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
## 让我们开始吧
|
||||||
|
|
||||||
## 入门
|
### 使用 Docker 部署
|
||||||
|
|
||||||
|
请一步步跟随 [这里](https://signoz.io/docs/install/docker/) 通过 docker 来安装。
|
||||||
|
|
||||||
### 使用Docker部署
|
这个 [排障说明书](https://signoz.io/docs/install/troubleshooting/) 可以帮助你解决碰到的问题。
|
||||||
|
|
||||||
请按照[这里](https://signoz.io/docs/install/docker/)列出的步骤使用Docker来安装
|
|
||||||
|
|
||||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/install/troubleshooting/)会对你有帮助。
|
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
|
### 使用 Helm 在 Kubernetes 部署
|
||||||
|
|
||||||
### 使用Helm在Kubernetes上部署
|
请一步步跟随 [这里](https://signoz.io/docs/deployment/helm_chart) 通过 helm 来安装
|
||||||
|
|
||||||
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
|
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
## 比较相似的工具
|
||||||
|
|
||||||
## 与其他方案的比较
|
|
||||||
|
|
||||||
### SigNoz vs Prometheus
|
### SigNoz vs Prometheus
|
||||||
|
|
||||||
如果你只是需要监控指标(metrics),那Prometheus是不错的,但如果你要无缝的在metrics和traces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
Prometheus 是一个针对 metrics 监控的强大工具。但是如果你想无缝的切换 metrics 和 traces 查询,你当前大概率需要在 Prometheus 和 Jaeger 之间切换。
|
||||||
|
|
||||||
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合,这是目前Jaeger缺失的功能。
|
我们的目标是提供一个客户观测 metrics 和 traces 整合的 UI。就像 SaaS 供应商 DataDog,它提供很多 jaeger 缺失的功能,比如针对 traces 过滤功能和聚合功能。
|
||||||
|
|
||||||
<p>  </p>
|
<p>  </p>
|
||||||
|
|
||||||
### SigNoz vs Jaeger
|
### SigNoz vs Jaeger
|
||||||
|
|
||||||
Jaeger只做分布式追踪(distributed tracing),SigNoz则支持metrics,traces,logs ,即可视化的三大支柱。
|
Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metrics, traces 和 logs 所有的观测。
|
||||||
|
|
||||||
并且SigNoz有一些Jaeger没有的高级功能:
|
而且, SigNoz 相较于 Jaeger 拥有更对的高级功能:
|
||||||
|
|
||||||
- Jaegar UI无法在traces或过滤的traces上展示metrics。
|
- Jaegar UI 不能提供任何基于 traces 的 metrics 查询和过滤。
|
||||||
- Jaeger不能对过滤的traces做聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
|
|
||||||
|
- Jaeger 不能针对过滤的 traces 做聚合。 比如, p99 延迟的请求有个标签是 customer_type='premium'。 而这些在 SigNoz 可以轻松做到。
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Elastic
|
||||||
|
|
||||||
|
- SigNoz 的日志管理是基于 ClickHouse 实现的,可以使日志的聚合更加高效,因为它是基于 OLAP 的数据仓储。
|
||||||
|
|
||||||
|
- 与 Elastic 相比,可以节省 50% 的资源成本
|
||||||
|
|
||||||
|
我们已经公布了 Elastic 和 SigNoz 的性能对比。 请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Loki
|
||||||
|
|
||||||
|
- SigNoz 支持大容量高基数的聚合,但是 loki 是不支持的。
|
||||||
|
|
||||||
|
- SigNoz 支持索引的高基数查询,并且对索引没有数量限制,而 Loki 会在添加部分索引后到达最大上限。
|
||||||
|
|
||||||
|
- 相较于 SigNoz,Loki 在搜索大量数据下既困难又缓慢。
|
||||||
|
|
||||||
|
我们已经发布了基准测试对比 Loki 和 SigNoz 性能。请点击 [这里](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
|
||||||
|
|
||||||
## 贡献
|
## 贡献
|
||||||
|
|
||||||
|
我们 ❤️ 你的贡献,无论大小。 请先阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 再开始给 SigNoz 做贡献。
|
||||||
|
|
||||||
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
|
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
|
||||||
|
|
||||||
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
### 项目维护人员
|
||||||
|
|
||||||
### Project maintainers
|
#### 后端
|
||||||
|
|
||||||
#### Backend
|
|
||||||
|
|
||||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||||
- [Vishal Sharma](https://github.com/makeavish)
|
- [Vishal Sharma](https://github.com/makeavish)
|
||||||
|
|
||||||
#### Frontend
|
#### 前端
|
||||||
|
|
||||||
- [Palash Gupta](https://github.com/palashgdev)
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
|
||||||
#### DevOps
|
#### 运维开发
|
||||||
|
|
||||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
|
||||||
|
|
||||||
## 文档
|
## 文档
|
||||||
|
|
||||||
文档在这里:https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失,请在Github里发一个问题,并使用标签 `documentation` 或者在社区stack频道里告诉我们。
|
你可以通过 https://signoz.io/docs/ 找到相关文档。如果你需要阐述问题或者发现一些确实的事件, 通过标签为 `documentation` 提交 Github 问题。或者通过 slack 社区频道。
|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
|
||||||
|
|
||||||
## 社区
|
## 社区
|
||||||
|
|
||||||
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
|
加入 [slack 社区](https://signoz.io/slack) 去了解更多关于分布式追踪、可观测性系统 。或者与 SigNoz 其他用户和贡献者交流。
|
||||||
|
|
||||||
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。
|
如果你有任何想法、问题、或者任何反馈, 请通过 [Github Discussions](https://github.com/SigNoz/signoz/discussions) 分享。
|
||||||
|
|
||||||
最后,感谢我们这些优秀的贡献者们。
|
不管怎么样,感谢这个项目的所有贡献者!
|
||||||
|
|
||||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
|
|||||||
```sh
|
```sh
|
||||||
kubectl create ns sample-application
|
kubectl create ns sample-application
|
||||||
|
|
||||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
To generate load:
|
To generate load:
|
||||||
@@ -66,7 +66,7 @@ To generate load:
|
|||||||
```sh
|
```sh
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||||
```
|
```
|
||||||
|
|
||||||
To stop load:
|
To stop load:
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
image: clickhouse/clickhouse-server:23.11.1-alpine
|
||||||
tty: true
|
tty: true
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
@@ -16,7 +16,14 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8123/ping"
|
||||||
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -26,12 +33,14 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-clickhouse-depend: &clickhouse-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
- clickhouse
|
- clickhouse
|
||||||
|
- otel-collector-migrator
|
||||||
# - clickhouse-2
|
# - clickhouse-2
|
||||||
# - clickhouse-3
|
# - clickhouse-3
|
||||||
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
@@ -124,7 +133,7 @@ services:
|
|||||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.1
|
image: signoz/alertmanager:0.23.4
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
command:
|
command:
|
||||||
@@ -137,8 +146,12 @@ services:
|
|||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.22.0
|
image: signoz/query-service:0.35.1
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
"--prefer-delta=true"
|
||||||
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -156,17 +169,24 @@ services:
|
|||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-swarm
|
- DEPLOYMENT_TYPE=docker-swarm
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8080/api/v1/health"
|
||||||
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.22.0
|
image: signoz/frontend:0.35.1
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@@ -179,11 +199,17 @@ services:
|
|||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.79.1
|
image: signoz/signoz-otel-collector:0.88.3
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||||
@@ -191,8 +217,8 @@ services:
|
|||||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
ports:
|
ports:
|
||||||
# - "1777:1777" # pprof extension
|
# - "1777:1777" # pprof extension
|
||||||
- "4317:4317" # OTLP gRPC receiver
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
- "4318:4318" # OTLP HTTP receiver
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
# - "9411:9411" # Zipkin port
|
# - "9411:9411" # Zipkin port
|
||||||
@@ -205,11 +231,31 @@ services:
|
|||||||
mode: global
|
mode: global
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
- otel-collector-migrator
|
||||||
|
- query-service
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:0.88.3
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
# - clickhouse-2
|
||||||
|
# - clickhouse-3
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
image: signoz/signoz-otel-collector:0.79.1
|
image: signoz/signoz-otel-collector:0.88.3
|
||||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-metrics-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
# ports:
|
# ports:
|
||||||
@@ -220,11 +266,24 @@ services:
|
|||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
|
logspout:
|
||||||
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
volumes:
|
||||||
|
- /etc/hostname:/etc/host_hostname:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: syslog+tcp://otel-collector:2255
|
||||||
|
depends_on:
|
||||||
|
- otel-collector
|
||||||
|
deploy:
|
||||||
|
mode: global
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
command: ["all"]
|
command: [ "all" ]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
logging:
|
logging:
|
||||||
|
|||||||
@@ -1,29 +1,21 @@
|
|||||||
receivers:
|
receivers:
|
||||||
filelog/dockercontainers:
|
tcplog/docker:
|
||||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
listen_address: "0.0.0.0:2255"
|
||||||
start_at: end
|
|
||||||
include_file_path: true
|
|
||||||
include_file_name: false
|
|
||||||
operators:
|
operators:
|
||||||
- type: json_parser
|
- type: regex_parser
|
||||||
id: parser-docker
|
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||||
output: extract_metadata_from_filepath
|
timestamp:
|
||||||
timestamp:
|
parse_from: attributes.timestamp
|
||||||
parse_from: attributes.time
|
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
- type: move
|
||||||
- type: regex_parser
|
from: attributes["body"]
|
||||||
id: extract_metadata_from_filepath
|
to: body
|
||||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
- type: remove
|
||||||
parse_from: attributes["log.file.path"]
|
field: attributes.timestamp
|
||||||
output: parse_body
|
# please remove names from below if you want to collect logs from them
|
||||||
- type: move
|
- type: filter
|
||||||
id: parse_body
|
id: signoz_logs_filter
|
||||||
from: attributes.log
|
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
||||||
to: body
|
|
||||||
output: time
|
|
||||||
- type: remove
|
|
||||||
id: time
|
|
||||||
field: attributes.time
|
|
||||||
opencensus:
|
opencensus:
|
||||||
endpoint: 0.0.0.0:55678
|
endpoint: 0.0.0.0:55678
|
||||||
otlp/spanmetrics:
|
otlp/spanmetrics:
|
||||||
@@ -166,6 +158,6 @@ service:
|
|||||||
receivers: [otlp/spanmetrics]
|
receivers: [otlp/spanmetrics]
|
||||||
exporters: [prometheus]
|
exporters: [prometheus]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, filelog/dockercontainers]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter]
|
exporters: [clickhouselogsexporter]
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
server_endpoint: ws://query-service:4320/v1/opamp
|
||||||
@@ -24,8 +24,16 @@ server {
|
|||||||
try_files $uri $uri/ /index.html;
|
try_files $uri $uri/ /index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api/alertmanager {
|
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||||
proxy_pass http://alertmanager:9093/api/v2;
|
proxy_pass http://query-service:8080;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
# connection will be closed if no data is read for 600s between successive read operations
|
||||||
|
proxy_read_timeout 600s;
|
||||||
|
|
||||||
|
# dont buffer the data send it directly to client.
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_cache off;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api {
|
location /api {
|
||||||
|
|||||||
@@ -1,9 +1,26 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.1
|
||||||
|
container_name: signoz-zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
image: clickhouse/clickhouse-server:23.7.3-alpine
|
||||||
container_name: clickhouse
|
container_name: signoz-clickhouse
|
||||||
# ports:
|
# ports:
|
||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
# - "8123:8123"
|
# - "8123:8123"
|
||||||
@@ -11,8 +28,11 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
logging:
|
logging:
|
||||||
options:
|
options:
|
||||||
@@ -20,14 +40,21 @@ services:
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8123/ping"
|
||||||
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
container_name: alertmanager
|
container_name: signoz-alertmanager
|
||||||
image: signoz/alertmanager:0.23.1
|
image: signoz/alertmanager:0.23.4
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -38,20 +65,40 @@ services:
|
|||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
- --storage.path=/data
|
- --storage.path=/data
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.3}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
otel-collector:
|
otel-collector:
|
||||||
container_name: otel-collector
|
container_name: signoz-otel-collector
|
||||||
image: signoz/signoz-otel-collector:0.79.1
|
image: signoz/signoz-otel-collector:0.88.3
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
# user: root # required for reading docker container logs
|
# user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
ports:
|
ports:
|
||||||
# - "1777:1777" # pprof extension
|
# - "1777:1777" # pprof extension
|
||||||
- "4317:4317" # OTLP gRPC receiver
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
- "4318:4318" # OTLP HTTP receiver
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
# - "9411:9411" # Zipkin port
|
# - "9411:9411" # Zipkin port
|
||||||
@@ -64,11 +111,19 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
container_name: otel-collector-metrics
|
container_name: signoz-otel-collector-metrics
|
||||||
image: signoz/signoz-otel-collector:0.79.1
|
image: signoz/signoz-otel-collector:0.88.3
|
||||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-metrics-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
# ports:
|
# ports:
|
||||||
@@ -80,6 +135,19 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
logspout:
|
||||||
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
container_name: signoz-logspout
|
||||||
|
volumes:
|
||||||
|
- /etc/hostname:/etc/host_hostname:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: syslog+tcp://otel-collector:2255
|
||||||
|
depends_on:
|
||||||
|
- otel-collector
|
||||||
|
restart: on-failure
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
@@ -88,7 +156,7 @@ services:
|
|||||||
options:
|
options:
|
||||||
max-size: 50m
|
max-size: 50m
|
||||||
max-file: "3"
|
max-file: "3"
|
||||||
command: ["all"]
|
command: [ "all" ]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
|
|||||||
@@ -4,12 +4,12 @@ services:
|
|||||||
query-service:
|
query-service:
|
||||||
hostname: query-service
|
hostname: query-service
|
||||||
build:
|
build:
|
||||||
context: "../../../pkg/query-service"
|
context: "../../../"
|
||||||
dockerfile: "./Dockerfile"
|
dockerfile: "./pkg/query-service/Dockerfile"
|
||||||
args:
|
args:
|
||||||
LDFLAGS: ""
|
LDFLAGS: ""
|
||||||
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
|
TARGETPLATFORM: "${GOOS}/${GOARCH}"
|
||||||
container_name: query-service
|
container_name: signoz-query-service
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000
|
- ClickHouseUrl=tcp://clickhouse:9000
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
@@ -22,13 +22,24 @@ services:
|
|||||||
- ./prometheus.yml:/root/config/prometheus.yml
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
"--prefer-delta=true"
|
||||||
|
]
|
||||||
ports:
|
ports:
|
||||||
- "6060:6060"
|
- "6060:6060"
|
||||||
- "8080:8080"
|
- "8080:8080"
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8080/api/v1/health"
|
||||||
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -41,9 +52,9 @@ services:
|
|||||||
context: "../../../frontend"
|
context: "../../../frontend"
|
||||||
dockerfile: "./Dockerfile"
|
dockerfile: "./Dockerfile"
|
||||||
args:
|
args:
|
||||||
TARGETOS: "${LOCAL_GOOS}"
|
TARGETOS: "${GOOS}"
|
||||||
TARGETPLATFORM: "${LOCAL_GOARCH}"
|
TARGETPLATFORM: "${GOARCH}"
|
||||||
container_name: frontend
|
container_name: signoz-frontend
|
||||||
environment:
|
environment:
|
||||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
|
|||||||
@@ -2,7 +2,8 @@ version: "2.4"
|
|||||||
|
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
|
image: clickhouse/clickhouse-server:23.11.1-alpine
|
||||||
tty: true
|
tty: true
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper-1
|
- zookeeper-1
|
||||||
@@ -14,7 +15,14 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8123/ping"
|
||||||
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -24,10 +32,12 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
|
||||||
x-clickhouse-depend: &clickhouse-depend
|
x-db-depend: &db-depend
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# condition: service_healthy
|
# condition: service_healthy
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
@@ -37,7 +47,7 @@ services:
|
|||||||
|
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: bitnami/zookeeper:3.7.1
|
||||||
container_name: zookeeper-1
|
container_name: signoz-zookeeper-1
|
||||||
hostname: zookeeper-1
|
hostname: zookeeper-1
|
||||||
user: root
|
user: root
|
||||||
ports:
|
ports:
|
||||||
@@ -54,7 +64,7 @@ services:
|
|||||||
|
|
||||||
# zookeeper-2:
|
# zookeeper-2:
|
||||||
# image: bitnami/zookeeper:3.7.0
|
# image: bitnami/zookeeper:3.7.0
|
||||||
# container_name: zookeeper-2
|
# container_name: signoz-zookeeper-2
|
||||||
# hostname: zookeeper-2
|
# hostname: zookeeper-2
|
||||||
# user: root
|
# user: root
|
||||||
# ports:
|
# ports:
|
||||||
@@ -71,7 +81,7 @@ services:
|
|||||||
|
|
||||||
# zookeeper-3:
|
# zookeeper-3:
|
||||||
# image: bitnami/zookeeper:3.7.0
|
# image: bitnami/zookeeper:3.7.0
|
||||||
# container_name: zookeeper-3
|
# container_name: signoz-zookeeper-3
|
||||||
# hostname: zookeeper-3
|
# hostname: zookeeper-3
|
||||||
# user: root
|
# user: root
|
||||||
# ports:
|
# ports:
|
||||||
@@ -88,7 +98,7 @@ services:
|
|||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
<<: *clickhouse-defaults
|
<<: *clickhouse-defaults
|
||||||
container_name: clickhouse
|
container_name: signoz-clickhouse
|
||||||
hostname: clickhouse
|
hostname: clickhouse
|
||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000:9000"
|
||||||
@@ -105,7 +115,7 @@ services:
|
|||||||
|
|
||||||
# clickhouse-2:
|
# clickhouse-2:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# container_name: clickhouse-2
|
# container_name: signoz-clickhouse-2
|
||||||
# hostname: clickhouse-2
|
# hostname: clickhouse-2
|
||||||
# ports:
|
# ports:
|
||||||
# - "9001:9000"
|
# - "9001:9000"
|
||||||
@@ -120,10 +130,10 @@ services:
|
|||||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
|
|
||||||
# clickhouse-3:
|
# clickhouse-3:
|
||||||
# <<: *clickhouse-defaults
|
# <<: *clickhouse-defaults
|
||||||
# container_name: clickhouse-3
|
# container_name: signoz-clickhouse-3
|
||||||
# hostname: clickhouse-3
|
# hostname: clickhouse-3
|
||||||
# ports:
|
# ports:
|
||||||
# - "9002:9000"
|
# - "9002:9000"
|
||||||
@@ -139,7 +149,8 @@ services:
|
|||||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.1}
|
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.4}
|
||||||
|
container_name: signoz-alertmanager
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -153,9 +164,13 @@ services:
|
|||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.22.0}
|
image: signoz/query-service:${DOCKER_TAG:-0.35.1}
|
||||||
container_name: query-service
|
container_name: signoz-query-service
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command:
|
||||||
|
[
|
||||||
|
"-config=/root/config/prometheus.yml",
|
||||||
|
"--prefer-delta=true"
|
||||||
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
# - "8080:8080" # query-service port
|
# - "8080:8080" # query-service port
|
||||||
@@ -174,15 +189,22 @@ services:
|
|||||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--spider",
|
||||||
|
"-q",
|
||||||
|
"localhost:8080/api/v1/health"
|
||||||
|
]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.22.0}
|
image: signoz/frontend:${DOCKER_TAG:-0.35.1}
|
||||||
container_name: frontend
|
container_name: signoz-frontend
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
- alertmanager
|
- alertmanager
|
||||||
@@ -192,12 +214,34 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector-migrator:
|
||||||
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.3}
|
||||||
|
container_name: otel-migrator
|
||||||
|
command:
|
||||||
|
- "--dsn=tcp://clickhouse:9000"
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.1}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.3}
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
container_name: signoz-otel-collector
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-config.yaml",
|
||||||
|
"--manager-config=/etc/manager-config.yaml",
|
||||||
|
"--copy-path=/var/tmp/collector-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
user: root # required for reading docker container logs
|
user: root # required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
@@ -205,8 +249,8 @@ services:
|
|||||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
ports:
|
ports:
|
||||||
# - "1777:1777" # pprof extension
|
# - "1777:1777" # pprof extension
|
||||||
- "4317:4317" # OTLP gRPC receiver
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
- "4318:4318" # OTLP HTTP receiver
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
# - "8888:8888" # OtelCollector internal metrics
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
# - "9411:9411" # Zipkin port
|
# - "9411:9411" # Zipkin port
|
||||||
@@ -216,11 +260,22 @@ services:
|
|||||||
# - "55678:55678" # OpenCensus receiver
|
# - "55678:55678" # OpenCensus receiver
|
||||||
# - "55679:55679" # zPages extension
|
# - "55679:55679" # zPages extension
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
<<: *clickhouse-depend
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
otel-collector-migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.1}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.3}
|
||||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
container_name: signoz-otel-collector-metrics
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"--config=/etc/otel-collector-metrics-config.yaml",
|
||||||
|
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||||
|
]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
# ports:
|
# ports:
|
||||||
@@ -229,7 +284,18 @@ services:
|
|||||||
# - "13133:13133" # Health check extension
|
# - "13133:13133" # Health check extension
|
||||||
# - "55679:55679" # zPages extension
|
# - "55679:55679" # zPages extension
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
<<: *clickhouse-depend
|
<<: *db-depend
|
||||||
|
|
||||||
|
logspout:
|
||||||
|
image: "gliderlabs/logspout:v3.2.14"
|
||||||
|
container_name: signoz-logspout
|
||||||
|
volumes:
|
||||||
|
- /etc/hostname:/etc/host_hostname:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: syslog+tcp://otel-collector:2255
|
||||||
|
depends_on:
|
||||||
|
- otel-collector
|
||||||
|
restart: on-failure
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
@@ -238,7 +304,7 @@ services:
|
|||||||
options:
|
options:
|
||||||
max-size: 50m
|
max-size: 50m
|
||||||
max-file: "3"
|
max-file: "3"
|
||||||
command: ["all"]
|
command: [ "all" ]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
|
|||||||
@@ -1,29 +1,21 @@
|
|||||||
receivers:
|
receivers:
|
||||||
filelog/dockercontainers:
|
tcplog/docker:
|
||||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
listen_address: "0.0.0.0:2255"
|
||||||
start_at: end
|
|
||||||
include_file_path: true
|
|
||||||
include_file_name: false
|
|
||||||
operators:
|
operators:
|
||||||
- type: json_parser
|
- type: regex_parser
|
||||||
id: parser-docker
|
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||||
output: extract_metadata_from_filepath
|
timestamp:
|
||||||
timestamp:
|
parse_from: attributes.timestamp
|
||||||
parse_from: attributes.time
|
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
- type: move
|
||||||
- type: regex_parser
|
from: attributes["body"]
|
||||||
id: extract_metadata_from_filepath
|
to: body
|
||||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
- type: remove
|
||||||
parse_from: attributes["log.file.path"]
|
field: attributes.timestamp
|
||||||
output: parse_body
|
# please remove names from below if you want to collect logs from them
|
||||||
- type: move
|
- type: filter
|
||||||
id: parse_body
|
id: signoz_logs_filter
|
||||||
from: attributes.log
|
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|otel-collector-metrics|clickhouse|zookeeper)"'
|
||||||
to: body
|
|
||||||
output: time
|
|
||||||
- type: remove
|
|
||||||
id: time
|
|
||||||
field: attributes.time
|
|
||||||
opencensus:
|
opencensus:
|
||||||
endpoint: 0.0.0.0:55678
|
endpoint: 0.0.0.0:55678
|
||||||
otlp/spanmetrics:
|
otlp/spanmetrics:
|
||||||
@@ -70,40 +62,6 @@ receivers:
|
|||||||
|
|
||||||
|
|
||||||
processors:
|
processors:
|
||||||
logstransform/internal:
|
|
||||||
operators:
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.trace_id
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.span_id
|
|
||||||
output: remove_trace_id
|
|
||||||
- type: trace_parser
|
|
||||||
if: '"traceId" in attributes or "spanId" in attributes'
|
|
||||||
trace_id:
|
|
||||||
parse_from: attributes.traceId
|
|
||||||
span_id:
|
|
||||||
parse_from: attributes.spanId
|
|
||||||
output: remove_traceId
|
|
||||||
- id: remove_traceId
|
|
||||||
type: remove
|
|
||||||
if: '"traceId" in attributes'
|
|
||||||
field: attributes.traceId
|
|
||||||
output: remove_spanId
|
|
||||||
- id: remove_spanId
|
|
||||||
type: remove
|
|
||||||
if: '"spanId" in attributes'
|
|
||||||
field: attributes.spanId
|
|
||||||
- id: remove_trace_id
|
|
||||||
type: remove
|
|
||||||
if: '"trace_id" in attributes'
|
|
||||||
field: attributes.trace_id
|
|
||||||
output: remove_span_id
|
|
||||||
- id: remove_span_id
|
|
||||||
type: remove
|
|
||||||
if: '"span_id" in attributes'
|
|
||||||
field: attributes.span_id
|
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 10000
|
send_batch_size: 10000
|
||||||
send_batch_max_size: 11000
|
send_batch_max_size: 11000
|
||||||
@@ -205,6 +163,6 @@ service:
|
|||||||
receivers: [otlp/spanmetrics]
|
receivers: [otlp/spanmetrics]
|
||||||
exporters: [prometheus]
|
exporters: [prometheus]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp, filelog/dockercontainers]
|
receivers: [otlp, tcplog/docker]
|
||||||
processors: [logstransform/internal, batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter]
|
exporters: [clickhouselogsexporter]
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
server_endpoint: ws://query-service:4320/v1/opamp
|
||||||
@@ -24,8 +24,16 @@ server {
|
|||||||
try_files $uri $uri/ /index.html;
|
try_files $uri $uri/ /index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api/alertmanager {
|
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||||
proxy_pass http://alertmanager:9093/api/v2;
|
proxy_pass http://query-service:8080;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
# connection will be closed if no data is read for 600s between successive read operations
|
||||||
|
proxy_read_timeout 600s;
|
||||||
|
|
||||||
|
# dont buffer the data send it directly to client.
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_cache off;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api {
|
location /api {
|
||||||
|
|||||||
@@ -36,9 +36,9 @@ is_mac() {
|
|||||||
[[ $OSTYPE == darwin* ]]
|
[[ $OSTYPE == darwin* ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
# is_arm64(){
|
is_arm64(){
|
||||||
# [[ `uname -m` == 'arm64' ]]
|
[[ `uname -m` == 'arm64' || `uname -m` == 'aarch64' ]]
|
||||||
# }
|
}
|
||||||
|
|
||||||
check_os() {
|
check_os() {
|
||||||
if is_mac; then
|
if is_mac; then
|
||||||
@@ -48,6 +48,16 @@ check_os() {
|
|||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if is_arm64; then
|
||||||
|
arch="arm64"
|
||||||
|
arch_official="aarch64"
|
||||||
|
else
|
||||||
|
arch="amd64"
|
||||||
|
arch_official="x86_64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
platform=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||||
|
|
||||||
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
|
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
|
||||||
|
|
||||||
case "$os_name" in
|
case "$os_name" in
|
||||||
@@ -143,7 +153,7 @@ install_docker() {
|
|||||||
$apt_cmd install software-properties-common gnupg-agent
|
$apt_cmd install software-properties-common gnupg-agent
|
||||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||||
$sudo_cmd add-apt-repository \
|
$sudo_cmd add-apt-repository \
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
"deb [arch=$arch] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||||
$apt_cmd update
|
$apt_cmd update
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||||
@@ -178,12 +188,20 @@ install_docker() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
compose_version () {
|
||||||
|
local compose_version
|
||||||
|
compose_version="$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\" -f4)"
|
||||||
|
echo "${compose_version:-v2.18.1}"
|
||||||
|
}
|
||||||
|
|
||||||
install_docker_compose() {
|
install_docker_compose() {
|
||||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||||
echo "++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++"
|
||||||
echo "Installing docker-compose"
|
echo "Installing docker-compose"
|
||||||
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
compose_url="https://github.com/docker/compose/releases/download/$(compose_version)/docker-compose-$platform-$arch_official"
|
||||||
|
echo "Downloading docker-compose from $compose_url"
|
||||||
|
$sudo_cmd curl -L "$compose_url" -o /usr/local/bin/docker-compose
|
||||||
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||||
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||||
echo "docker-compose installed!"
|
echo "docker-compose installed!"
|
||||||
@@ -516,7 +534,7 @@ else
|
|||||||
echo ""
|
echo ""
|
||||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||||
echo ""
|
echo ""
|
||||||
echo "ℹ️ By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
|
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||||
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||||
|
|||||||
14
e2e/package.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"name": "e2e",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"main": "index.js",
|
||||||
|
"license": "MIT",
|
||||||
|
"devDependencies": {
|
||||||
|
"@playwright/test": "^1.22.0",
|
||||||
|
"@types/node": "^20.9.2"
|
||||||
|
},
|
||||||
|
"scripts": {},
|
||||||
|
"dependencies": {
|
||||||
|
"dotenv": "8.2.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
46
e2e/playwright.config.ts
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import { defineConfig, devices } from "@playwright/test";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
testDir: "./tests",
|
||||||
|
|
||||||
|
fullyParallel: true,
|
||||||
|
|
||||||
|
forbidOnly: !!process.env.CI,
|
||||||
|
|
||||||
|
name: "Signoz E2E",
|
||||||
|
|
||||||
|
retries: process.env.CI ? 2 : 0,
|
||||||
|
|
||||||
|
reporter: process.env.CI ? "github" : "list",
|
||||||
|
|
||||||
|
preserveOutput: "always",
|
||||||
|
|
||||||
|
updateSnapshots: "all",
|
||||||
|
|
||||||
|
quiet: false,
|
||||||
|
|
||||||
|
testMatch: ["**/*.spec.ts"],
|
||||||
|
|
||||||
|
use: {
|
||||||
|
trace: "on-first-retry",
|
||||||
|
|
||||||
|
baseURL:
|
||||||
|
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
|
||||||
|
},
|
||||||
|
|
||||||
|
projects: [
|
||||||
|
{ name: "setup", testMatch: /.*\.setup\.ts/ },
|
||||||
|
{
|
||||||
|
name: "chromium",
|
||||||
|
use: {
|
||||||
|
...devices["Desktop Chrome"],
|
||||||
|
// Use prepared auth state.
|
||||||
|
storageState: ".auth/user.json",
|
||||||
|
},
|
||||||
|
dependencies: ["setup"],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
37
e2e/tests/auth.setup.ts
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import { test, expect } from "@playwright/test";
|
||||||
|
import ROUTES from "../../frontend/src/constants/routes";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const authFile = ".auth/user.json";
|
||||||
|
|
||||||
|
test("E2E Login Test", async ({ page }) => {
|
||||||
|
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
|
||||||
|
|
||||||
|
const signup = "Monitor your applications. Find what is causing issues.";
|
||||||
|
|
||||||
|
const el = await page.locator(`text=${signup}`);
|
||||||
|
|
||||||
|
expect(el).toBeVisible();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.locator("id=loginEmail")
|
||||||
|
.type(
|
||||||
|
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
|
||||||
|
);
|
||||||
|
|
||||||
|
await page.getByText("Next").click();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.locator('input[id="currentPassword"]')
|
||||||
|
.fill(
|
||||||
|
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
|
||||||
|
);
|
||||||
|
|
||||||
|
await page.locator('button[data-attr="signup"]').click();
|
||||||
|
|
||||||
|
await expect(page).toHaveURL(ROUTES.APPLICATION);
|
||||||
|
|
||||||
|
await page.context().storageState({ path: authFile });
|
||||||
|
});
|
||||||
10
e2e/tests/contants.ts
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
export const SERVICE_TABLE_HEADERS = {
|
||||||
|
APPLICATION: "Applicaton",
|
||||||
|
P99LATENCY: "P99 latency (in ms)",
|
||||||
|
ERROR_RATE: "Error Rate (% of total)",
|
||||||
|
OPS_PER_SECOND: "Operations Per Second",
|
||||||
|
};
|
||||||
|
|
||||||
|
export const DATA_TEST_IDS = {
|
||||||
|
NEW_DASHBOARD_BTN: "create-new-dashboard",
|
||||||
|
};
|
||||||
40
e2e/tests/navigation.spec.ts
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
import { test, expect } from "@playwright/test";
|
||||||
|
import ROUTES from "../../frontend/src/constants/routes";
|
||||||
|
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
|
||||||
|
|
||||||
|
test("Basic Navigation Check across different resources", async ({ page }) => {
|
||||||
|
// route to services page and check if the page renders fine with BE contract
|
||||||
|
await Promise.all([
|
||||||
|
page.goto(ROUTES.APPLICATION),
|
||||||
|
page.waitForRequest("**/v1/services"),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const p99Latency = page.locator(
|
||||||
|
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(p99Latency).toBeVisible();
|
||||||
|
|
||||||
|
// route to the new trace explorer page and check if the page renders fine
|
||||||
|
await page.goto(ROUTES.TRACES_EXPLORER);
|
||||||
|
|
||||||
|
await page.waitForLoadState("networkidle");
|
||||||
|
|
||||||
|
const listViewTable = await page
|
||||||
|
.locator('div[role="presentation"]')
|
||||||
|
.isVisible();
|
||||||
|
|
||||||
|
expect(listViewTable).toBeTruthy();
|
||||||
|
|
||||||
|
// route to the dashboards page and check if the page renders fine
|
||||||
|
await Promise.all([
|
||||||
|
page.goto(ROUTES.ALL_DASHBOARD),
|
||||||
|
page.waitForRequest("**/v1/dashboards"),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const newDashboardBtn = await page
|
||||||
|
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
|
||||||
|
.isVisible();
|
||||||
|
|
||||||
|
expect(newDashboardBtn).toBeTruthy();
|
||||||
|
});
|
||||||
46
e2e/yarn.lock
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||||
|
# yarn lockfile v1
|
||||||
|
|
||||||
|
|
||||||
|
"@playwright/test@^1.22.0":
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
|
||||||
|
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
|
||||||
|
dependencies:
|
||||||
|
playwright "1.40.0"
|
||||||
|
|
||||||
|
"@types/node@^20.9.2":
|
||||||
|
version "20.9.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
|
||||||
|
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
|
||||||
|
dependencies:
|
||||||
|
undici-types "~5.26.4"
|
||||||
|
|
||||||
|
dotenv@8.2.0:
|
||||||
|
version "8.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
|
||||||
|
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
|
||||||
|
|
||||||
|
fsevents@2.3.2:
|
||||||
|
version "2.3.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
|
||||||
|
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
|
||||||
|
|
||||||
|
playwright-core@1.40.0:
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
|
||||||
|
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
|
||||||
|
|
||||||
|
playwright@1.40.0:
|
||||||
|
version "1.40.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
|
||||||
|
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
|
||||||
|
dependencies:
|
||||||
|
playwright-core "1.40.0"
|
||||||
|
optionalDependencies:
|
||||||
|
fsevents "2.3.2"
|
||||||
|
|
||||||
|
undici-types@~5.26.4:
|
||||||
|
version "5.26.5"
|
||||||
|
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
|
||||||
|
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
|
||||||
@@ -1,48 +1,30 @@
|
|||||||
FROM golang:1.18-buster AS builder
|
|
||||||
|
|
||||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
|
||||||
ARG LD_FLAGS
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
|
|
||||||
ENV CGO_ENABLED=1
|
|
||||||
ENV GOPATH=/go
|
|
||||||
|
|
||||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
|
||||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
|
|
||||||
|
|
||||||
# Prepare and enter src directory
|
|
||||||
WORKDIR /go/src/github.com/signoz/signoz
|
|
||||||
|
|
||||||
# Add the sources and proceed with build
|
|
||||||
ADD . .
|
|
||||||
RUN cd ee/query-service \
|
|
||||||
&& go build -tags timetzdata -a -o ./bin/query-service \
|
|
||||||
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
|
|
||||||
&& chmod +x ./bin/query-service
|
|
||||||
|
|
||||||
|
|
||||||
# use a minimal alpine image
|
# use a minimal alpine image
|
||||||
FROM alpine:3.7
|
FROM alpine:3.18.5
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|
||||||
|
# define arguments that can be passed during build time
|
||||||
|
ARG TARGETOS TARGETARCH
|
||||||
|
|
||||||
# add ca-certificates in case you need them
|
# add ca-certificates in case you need them
|
||||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
# set working directory
|
# set working directory
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
# copy the binary from builder
|
# copy the query-service binary
|
||||||
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
|
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
|
||||||
|
|
||||||
# copy prometheus YAML config
|
# copy prometheus YAML config
|
||||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||||
|
|
||||||
|
# Make query-service executable for non-root users
|
||||||
|
RUN chmod 755 /root /root/query-service
|
||||||
|
|
||||||
# run the binary
|
# run the binary
|
||||||
ENTRYPOINT ["./query-service"]
|
ENTRYPOINT ["./query-service"]
|
||||||
|
|
||||||
CMD ["-config", "../config/prometheus.yml"]
|
CMD ["-config", "/root/config/prometheus.yml"]
|
||||||
# CMD ["./query-service -config /root/config/prometheus.yml"]
|
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|||||||
@@ -2,12 +2,16 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
"go.signoz.io/signoz/ee/query-service/license"
|
"go.signoz.io/signoz/ee/query-service/license"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||||
@@ -15,12 +19,22 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type APIHandlerOptions struct {
|
type APIHandlerOptions struct {
|
||||||
DataConnector interfaces.DataConnector
|
DataConnector interfaces.DataConnector
|
||||||
SkipConfig *basemodel.SkipConfig
|
SkipConfig *basemodel.SkipConfig
|
||||||
AppDao dao.ModelDao
|
PreferDelta bool
|
||||||
RulesManager *rules.Manager
|
PreferSpanMetrics bool
|
||||||
FeatureFlags baseint.FeatureLookup
|
MaxIdleConns int
|
||||||
LicenseManager *license.Manager
|
MaxOpenConns int
|
||||||
|
DialTimeout time.Duration
|
||||||
|
AppDao dao.ModelDao
|
||||||
|
RulesManager *rules.Manager
|
||||||
|
UsageManager *usage.Manager
|
||||||
|
FeatureFlags baseint.FeatureLookup
|
||||||
|
LicenseManager *license.Manager
|
||||||
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
|
Cache cache.Cache
|
||||||
|
// Querier Influx Interval
|
||||||
|
FluxInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
type APIHandler struct {
|
type APIHandler struct {
|
||||||
@@ -32,11 +46,20 @@ type APIHandler struct {
|
|||||||
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||||
|
|
||||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||||
Reader: opts.DataConnector,
|
Reader: opts.DataConnector,
|
||||||
SkipConfig: opts.SkipConfig,
|
SkipConfig: opts.SkipConfig,
|
||||||
AppDao: opts.AppDao,
|
PerferDelta: opts.PreferDelta,
|
||||||
RuleManager: opts.RulesManager,
|
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||||
FeatureFlags: opts.FeatureFlags})
|
MaxIdleConns: opts.MaxIdleConns,
|
||||||
|
MaxOpenConns: opts.MaxOpenConns,
|
||||||
|
DialTimeout: opts.DialTimeout,
|
||||||
|
AppDao: opts.AppDao,
|
||||||
|
RuleManager: opts.RulesManager,
|
||||||
|
FeatureFlags: opts.FeatureFlags,
|
||||||
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
|
Cache: opts.Cache,
|
||||||
|
FluxInterval: opts.FluxInterval,
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -61,6 +84,10 @@ func (ah *APIHandler) LM() *license.Manager {
|
|||||||
return ah.opts.LicenseManager
|
return ah.opts.LicenseManager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) UM() *usage.Manager {
|
||||||
|
return ah.opts.UsageManager
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||||
return ah.opts.AppDao
|
return ah.opts.AppDao
|
||||||
}
|
}
|
||||||
@@ -129,6 +156,17 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
|||||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||||
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
||||||
|
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||||
|
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||||
|
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v2/licenses",
|
||||||
|
am.ViewAccess(ah.listLicensesV2)).
|
||||||
|
Methods(http.MethodGet)
|
||||||
|
|
||||||
ah.APIHandler.RegisterRoutes(router, am)
|
ah.APIHandler.RegisterRoutes(router, am)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,22 +5,23 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseRequest(r *http.Request, req interface{}) error {
|
func parseRequest(r *http.Request, req interface{}) error {
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := ioutil.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -71,7 +72,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
var req *baseauth.RegisterRequest
|
var req *baseauth.RegisterRequest
|
||||||
|
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
requestBody, err := ioutil.ReadAll(r.Body)
|
requestBody, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("received no input in api\n", err)
|
zap.S().Errorf("received no input in api\n", err)
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
RespondError(w, model.BadRequest(err), nil)
|
||||||
@@ -107,13 +108,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
|||||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
precheckResp := &model.PrecheckResponse{
|
precheckResp := &basemodel.PrecheckResponse{
|
||||||
SSO: false,
|
SSO: false,
|
||||||
IsUser: false,
|
IsUser: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
if domain != nil && domain.SsoEnabled {
|
if domain != nil && domain.SsoEnabled {
|
||||||
// so is enabled, create user and respond precheck data
|
// sso is enabled, create user and respond precheck data
|
||||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
|
|||||||
51
ee/query-service/app/api/dashboard.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/common"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/model"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ah.lockUnlockDashboard(w, r, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ah.lockUnlockDashboard(w, r, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
|
||||||
|
// Locking can only be done by the owner of the dashboard
|
||||||
|
// or an admin
|
||||||
|
|
||||||
|
// - Fetch the dashboard
|
||||||
|
// - Check if the user is the owner or an admin
|
||||||
|
// - If yes, lock/unlock the dashboard
|
||||||
|
// - If no, return 403
|
||||||
|
|
||||||
|
// Get the dashboard UUID from the request
|
||||||
|
uuid := mux.Vars(r)["uuid"]
|
||||||
|
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user := common.GetUserFromContext(r.Context())
|
||||||
|
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock/Unlock the dashboard
|
||||||
|
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, "Dashboard updated successfully")
|
||||||
|
}
|
||||||
@@ -2,6 +2,8 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -10,5 +12,13 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if ah.opts.PreferSpanMetrics {
|
||||||
|
for idx := range featureSet {
|
||||||
|
feature := &featureSet[idx]
|
||||||
|
if feature.Name == basemodel.UseSpanMetrics {
|
||||||
|
featureSet[idx].Active = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
ah.Respond(w, featureSet)
|
ah.Respond(w, featureSet)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,10 +4,45 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type tierBreakdown struct {
|
||||||
|
UnitPrice float64 `json:"unitPrice"`
|
||||||
|
Quantity float64 `json:"quantity"`
|
||||||
|
TierStart int64 `json:"tierStart"`
|
||||||
|
TierEnd int64 `json:"tierEnd"`
|
||||||
|
TierCost float64 `json:"tierCost"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type usageResponse struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
Tiers []tierBreakdown `json:"tiers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type details struct {
|
||||||
|
Total float64 `json:"total"`
|
||||||
|
Breakdown []usageResponse `json:"breakdown"`
|
||||||
|
BaseFee float64 `json:"baseFee"`
|
||||||
|
BillTotal float64 `json:"billTotal"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type billingDetails struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
BillingPeriodStart int64 `json:"billingPeriodStart"`
|
||||||
|
BillingPeriodEnd int64 `json:"billingPeriodEnd"`
|
||||||
|
Details details `json:"details"`
|
||||||
|
Discount float64 `json:"discount"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
@@ -17,7 +52,6 @@ func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
|
||||||
var l model.License
|
var l model.License
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||||
@@ -29,8 +63,7 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
||||||
license, apiError := ah.LM().Activate(ctx, l.Key)
|
|
||||||
if apiError != nil {
|
if apiError != nil {
|
||||||
RespondError(w, apiError, nil)
|
RespondError(w, apiError, nil)
|
||||||
return
|
return
|
||||||
@@ -38,3 +71,186 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
ah.Respond(w, license)
|
ah.Respond(w, license)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
type checkoutResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
RedirectURL string `json:"redirectURL"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
licenseResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var resp checkoutResponse
|
||||||
|
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, resp.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||||
|
licenseKey := r.URL.Query().Get("licenseKey")
|
||||||
|
|
||||||
|
if licenseKey == "" {
|
||||||
|
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
billingURL := fmt.Sprintf("%s/usage?licenseKey=%s", constants.LicenseSignozIo, licenseKey)
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("GET", billingURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
billingResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var billingResponse billingDetails
|
||||||
|
if err := json.NewDecoder(billingResp.Body).Decode(&billingResponse); err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(srikanthccv):Fetch the current day usage and add it to the response
|
||||||
|
ah.Respond(w, billingResponse.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||||
|
if apiError != nil {
|
||||||
|
RespondError(w, apiError, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := model.Licenses{
|
||||||
|
TrialStart: -1,
|
||||||
|
TrialEnd: -1,
|
||||||
|
OnTrial: false,
|
||||||
|
WorkSpaceBlock: false,
|
||||||
|
TrialConvertedToSubscription: false,
|
||||||
|
GracePeriodEnd: -1,
|
||||||
|
Licenses: licenses,
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentActiveLicenseKey string
|
||||||
|
|
||||||
|
for _, license := range licenses {
|
||||||
|
if license.IsCurrent {
|
||||||
|
currentActiveLicenseKey = license.Key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For the case when no license is applied i.e community edition
|
||||||
|
// There will be no trial details or license details
|
||||||
|
if currentActiveLicenseKey == "" {
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch trial details
|
||||||
|
hClient := &http.Client{}
|
||||||
|
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
zap.S().Error("Error while creating request for trial details", err)
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
trialResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
zap.S().Error("Error while fetching trial details", err)
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer trialResp.Body.Close()
|
||||||
|
|
||||||
|
trialRespBody, err := io.ReadAll(trialResp.Body)
|
||||||
|
|
||||||
|
if err != nil || trialResp.StatusCode != http.StatusOK {
|
||||||
|
zap.S().Error("Error while fetching trial details", err)
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var trialRespData model.SubscriptionServerResp
|
||||||
|
|
||||||
|
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
|
||||||
|
zap.S().Error("Error while decoding trial details", err)
|
||||||
|
// If there is an error in fetching trial details, we will still return the license details
|
||||||
|
// to avoid incorrectly blocking the UI
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.TrialStart = trialRespData.Data.TrialStart
|
||||||
|
resp.TrialEnd = trialRespData.Data.TrialEnd
|
||||||
|
resp.OnTrial = trialRespData.Data.OnTrial
|
||||||
|
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
|
||||||
|
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
|
||||||
|
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
|
||||||
|
|
||||||
|
ah.Respond(w, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
type checkoutResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
RedirectURL string `json:"redirectURL"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
hClient := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
|
||||||
|
licenseResp, err := hClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode response body
|
||||||
|
var resp checkoutResponse
|
||||||
|
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
|
||||||
|
RespondError(w, model.InternalError(err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.Respond(w, resp.Data)
|
||||||
|
}
|
||||||
|
|||||||
@@ -137,8 +137,8 @@ func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request
|
|||||||
var s basemodel.Series
|
var s basemodel.Series
|
||||||
s.QueryName = name
|
s.QueryName = name
|
||||||
s.Labels = v.Metric.Copy().Map()
|
s.Labels = v.Metric.Copy().Map()
|
||||||
for _, p := range v.Points {
|
for _, p := range v.Floats {
|
||||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
|
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.F})
|
||||||
}
|
}
|
||||||
seriesList = append(seriesList, &s)
|
seriesList = append(seriesList, &s)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -47,8 +48,18 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
|||||||
req.CreatedAt = time.Now().Unix()
|
req.CreatedAt = time.Now().Unix()
|
||||||
req.Token = generatePATToken()
|
req.Token = generatePATToken()
|
||||||
|
|
||||||
|
// default expiry is 30 days
|
||||||
|
if req.ExpiresAt == 0 {
|
||||||
|
req.ExpiresAt = time.Now().AddDate(0, 0, 30).Unix()
|
||||||
|
}
|
||||||
|
// max expiry is 1 year
|
||||||
|
if req.ExpiresAt > time.Now().AddDate(1, 0, 0).Unix() {
|
||||||
|
req.ExpiresAt = time.Now().AddDate(1, 0, 0).Unix()
|
||||||
|
}
|
||||||
|
|
||||||
zap.S().Debugf("Got PAT request: %+v", req)
|
zap.S().Debugf("Got PAT request: %+v", req)
|
||||||
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
|
var apierr basemodel.BaseApiError
|
||||||
|
if req, apierr = ah.AppDao().CreatePAT(ctx, req); apierr != nil {
|
||||||
RespondError(w, apierr, nil)
|
RespondError(w, apierr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package db
|
package db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ClickHouse/clickhouse-go/v2"
|
"github.com/ClickHouse/clickhouse-go/v2"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
@@ -15,8 +17,16 @@ type ClickhouseReader struct {
|
|||||||
*basechr.ClickHouseReader
|
*basechr.ClickHouseReader
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
|
func NewDataConnector(
|
||||||
ch := basechr.NewReader(localDB, promConfigPath, lm)
|
localDB *sqlx.DB,
|
||||||
|
promConfigPath string,
|
||||||
|
lm interfaces.FeatureLookup,
|
||||||
|
maxIdleConns int,
|
||||||
|
maxOpenConns int,
|
||||||
|
dialTimeout time.Duration,
|
||||||
|
cluster string,
|
||||||
|
) *ClickhouseReader {
|
||||||
|
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
|
||||||
return &ClickhouseReader{
|
return &ClickhouseReader{
|
||||||
conn: ch.GetConn(),
|
conn: ch.GetConn(),
|
||||||
appdb: localDB,
|
appdb: localDB,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
@@ -20,9 +20,12 @@ import (
|
|||||||
"github.com/soheilhy/cmux"
|
"github.com/soheilhy/cmux"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||||
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/dao"
|
"go.signoz.io/signoz/ee/query-service/dao"
|
||||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
|
||||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||||
"go.signoz.io/signoz/ee/query-service/usage"
|
"go.signoz.io/signoz/ee/query-service/usage"
|
||||||
@@ -31,9 +34,11 @@ import (
|
|||||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||||
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||||
@@ -54,8 +59,16 @@ type ServerOptions struct {
|
|||||||
HTTPHostPort string
|
HTTPHostPort string
|
||||||
PrivateHostPort string
|
PrivateHostPort string
|
||||||
// alert specific params
|
// alert specific params
|
||||||
DisableRules bool
|
DisableRules bool
|
||||||
RuleRepoURL string
|
RuleRepoURL string
|
||||||
|
PreferDelta bool
|
||||||
|
PreferSpanMetrics bool
|
||||||
|
MaxIdleConns int
|
||||||
|
MaxOpenConns int
|
||||||
|
DialTimeout time.Duration
|
||||||
|
CacheConfigPath string
|
||||||
|
FluxInterval string
|
||||||
|
Cluster string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP api service
|
// Server runs HTTP api service
|
||||||
@@ -76,6 +89,11 @@ type Server struct {
|
|||||||
// feature flags
|
// feature flags
|
||||||
featureLookup baseint.FeatureLookup
|
featureLookup baseint.FeatureLookup
|
||||||
|
|
||||||
|
// Usage manager
|
||||||
|
usageManager *usage.Manager
|
||||||
|
|
||||||
|
opampServer *opamp.Server
|
||||||
|
|
||||||
unavailableChannel chan healthcheck.Status
|
unavailableChannel chan healthcheck.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,7 +134,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
storage := os.Getenv("STORAGE")
|
storage := os.Getenv("STORAGE")
|
||||||
if storage == "clickhouse" {
|
if storage == "clickhouse" {
|
||||||
zap.S().Info("Using ClickHouse as datastore ...")
|
zap.S().Info("Using ClickHouse as datastore ...")
|
||||||
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
|
qb := db.NewDataConnector(
|
||||||
|
localDB,
|
||||||
|
serverOptions.PromConfigPath,
|
||||||
|
lm,
|
||||||
|
serverOptions.MaxIdleConns,
|
||||||
|
serverOptions.MaxOpenConns,
|
||||||
|
serverOptions.DialTimeout,
|
||||||
|
serverOptions.Cluster,
|
||||||
|
)
|
||||||
go qb.Start(readerReady)
|
go qb.Start(readerReady)
|
||||||
reader = qb
|
reader = qb
|
||||||
} else {
|
} else {
|
||||||
@@ -150,8 +176,19 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ingestion pipelines manager
|
||||||
|
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// initiate agent config handler
|
// initiate agent config handler
|
||||||
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
|
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||||
|
DB: localDB,
|
||||||
|
DBEngine: AppDbEngine,
|
||||||
|
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,14 +203,39 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
telemetry.GetInstance().SetReader(reader)
|
telemetry.GetInstance().SetReader(reader)
|
||||||
|
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||||
|
|
||||||
|
var c cache.Cache
|
||||||
|
if serverOptions.CacheConfigPath != "" {
|
||||||
|
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c = cache.NewCache(cacheOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
apiOpts := api.APIHandlerOptions{
|
apiOpts := api.APIHandlerOptions{
|
||||||
DataConnector: reader,
|
DataConnector: reader,
|
||||||
SkipConfig: skipConfig,
|
SkipConfig: skipConfig,
|
||||||
AppDao: modelDao,
|
PreferDelta: serverOptions.PreferDelta,
|
||||||
RulesManager: rm,
|
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||||
FeatureFlags: lm,
|
MaxIdleConns: serverOptions.MaxIdleConns,
|
||||||
LicenseManager: lm,
|
MaxOpenConns: serverOptions.MaxOpenConns,
|
||||||
|
DialTimeout: serverOptions.DialTimeout,
|
||||||
|
AppDao: modelDao,
|
||||||
|
RulesManager: rm,
|
||||||
|
UsageManager: usageManager,
|
||||||
|
FeatureFlags: lm,
|
||||||
|
LicenseManager: lm,
|
||||||
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
|
Cache: c,
|
||||||
|
FluxInterval: fluxInterval,
|
||||||
}
|
}
|
||||||
|
|
||||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||||
@@ -187,6 +249,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
ruleManager: rm,
|
ruleManager: rm,
|
||||||
serverOptions: serverOptions,
|
serverOptions: serverOptions,
|
||||||
unavailableChannel: make(chan healthcheck.Status),
|
unavailableChannel: make(chan healthcheck.Status),
|
||||||
|
usageManager: usageManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
httpServer, err := s.createPublicServer(apiHandler)
|
httpServer, err := s.createPublicServer(apiHandler)
|
||||||
@@ -204,6 +267,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
s.privateHTTP = privateServer
|
s.privateHTTP = privateServer
|
||||||
|
|
||||||
|
s.opampServer = opamp.InitializeServer(
|
||||||
|
&opAmpModel.AllAgents, agentConfMgr,
|
||||||
|
)
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,20 +391,20 @@ func (lrw *loggingResponseWriter) Flush() {
|
|||||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
pathToExtractBodyFrom := "/api/v3/query_range"
|
||||||
|
|
||||||
data := map[string]interface{}{}
|
data := map[string]interface{}{}
|
||||||
var postData *basemodel.QueryRangeParamsV2
|
var postData *v3.QueryRangeParamsV3
|
||||||
|
|
||||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
bodyBytes, err := ioutil.ReadAll(r.Body)
|
bodyBytes, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
r.Body.Close() // must close
|
r.Body.Close() // must close
|
||||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
json.Unmarshal(bodyBytes, &postData)
|
json.Unmarshal(bodyBytes, &postData)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -348,24 +415,34 @@ func extractDashboardMetaData(path string, r *http.Request) (map[string]interfac
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
signozMetricNotFound := false
|
signozMetricsUsed := false
|
||||||
|
signozLogsUsed := false
|
||||||
|
dataSources := []string{}
|
||||||
if postData != nil {
|
if postData != nil {
|
||||||
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
|
|
||||||
|
|
||||||
if postData.CompositeMetricQuery != nil {
|
if postData.CompositeQuery != nil {
|
||||||
data["queryType"] = postData.CompositeMetricQuery.QueryType
|
data["queryType"] = postData.CompositeQuery.QueryType
|
||||||
data["panelType"] = postData.CompositeMetricQuery.PanelType
|
data["panelType"] = postData.CompositeQuery.PanelType
|
||||||
|
|
||||||
|
signozLogsUsed, signozMetricsUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||||
}
|
}
|
||||||
|
|
||||||
data["datasource"] = postData.DataSource
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if signozMetricNotFound {
|
if signozMetricsUsed || signozLogsUsed {
|
||||||
telemetry.GetInstance().AddActiveMetricsUser()
|
if signozMetricsUsed {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
|
dataSources = append(dataSources, "metrics")
|
||||||
|
telemetry.GetInstance().AddActiveMetricsUser()
|
||||||
|
}
|
||||||
|
if signozLogsUsed {
|
||||||
|
dataSources = append(dataSources, "logs")
|
||||||
|
telemetry.GetInstance().AddActiveLogsUser()
|
||||||
|
}
|
||||||
|
data["dataSources"] = dataSources
|
||||||
|
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
||||||
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_V3, data, userEmail, true)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, true
|
return data, true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -385,10 +462,12 @@ func getActiveLogs(path string, r *http.Request) {
|
|||||||
|
|
||||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := auth.AttachJwtToContext(r.Context(), r)
|
||||||
|
r = r.WithContext(ctx)
|
||||||
route := mux.CurrentRoute(r)
|
route := mux.CurrentRoute(r)
|
||||||
path, _ := route.GetPathTemplate()
|
path, _ := route.GetPathTemplate()
|
||||||
|
|
||||||
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
|
queryRangeV3data, metadataExists := extractQueryRangeV3Data(path, r)
|
||||||
getActiveLogs(path, r)
|
getActiveLogs(path, r)
|
||||||
|
|
||||||
lrw := NewLoggingResponseWriter(w)
|
lrw := NewLoggingResponseWriter(w)
|
||||||
@@ -396,13 +475,16 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
|||||||
|
|
||||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||||
if metadataExists {
|
if metadataExists {
|
||||||
for key, value := range dashboardMetadata {
|
for key, value := range queryRangeV3data {
|
||||||
data[key] = value
|
data[key] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
if _, ok := telemetry.EnabledPaths()[path]; ok {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
userEmail, err := auth.GetEmailFromJwt(r.Context())
|
||||||
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
@@ -519,7 +601,7 @@ func (s *Server) Start() error {
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||||
err := opamp.InitalizeServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
|
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Info("opamp ws server failed to start", err)
|
zap.S().Info("opamp ws server failed to start", err)
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
s.unavailableChannel <- healthcheck.Unavailable
|
||||||
@@ -542,12 +624,15 @@ func (s *Server) Stop() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opamp.StopServer()
|
s.opampServer.Stop()
|
||||||
|
|
||||||
if s.ruleManager != nil {
|
if s.ruleManager != nil {
|
||||||
s.ruleManager.Stop()
|
s.ruleManager.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stop usage manager
|
||||||
|
s.usageManager.Stop()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||||
|
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||||
|
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||||
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
||||||
|
|
||||||
func GetOrDefaultEnv(key string, fallback string) string {
|
func GetOrDefaultEnv(key string, fallback string) string {
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ type ModelDao interface {
|
|||||||
DB() *sqlx.DB
|
DB() *sqlx.DB
|
||||||
|
|
||||||
// auth methods
|
// auth methods
|
||||||
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
|
|
||||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
||||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||||
@@ -34,7 +33,7 @@ type ModelDao interface {
|
|||||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||||
|
|
||||||
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
|
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
|
||||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
||||||
|
|||||||
@@ -5,16 +5,61 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
|
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
|
||||||
|
// get auth domain from email domain
|
||||||
|
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
|
|
||||||
|
if apierr != nil {
|
||||||
|
zap.S().Errorf("failed to get domain from email", apierr)
|
||||||
|
return nil, model.InternalErrorStr("failed to get domain from email")
|
||||||
|
}
|
||||||
|
|
||||||
|
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
|
||||||
|
if err != nil {
|
||||||
|
zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
|
||||||
|
return nil, model.InternalErrorStr("failed to generate password hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
|
||||||
|
user := &basemodel.User{
|
||||||
|
Id: uuid.NewString(),
|
||||||
|
Name: "",
|
||||||
|
Email: email,
|
||||||
|
Password: hash,
|
||||||
|
CreatedAt: time.Now().Unix(),
|
||||||
|
ProfilePictureURL: "", // Currently unused
|
||||||
|
GroupId: group.Id,
|
||||||
|
OrgId: domain.OrgId,
|
||||||
|
}
|
||||||
|
|
||||||
|
user, apiErr = m.CreateUser(ctx, user, false)
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
|
||||||
|
return nil, apiErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return user, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||||
// is successfully parsed (i.e. valid email is available)
|
// is successfully parsed (i.e. valid email is available)
|
||||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||||
|
|
||||||
@@ -24,7 +69,20 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
|
user := &basemodel.User{}
|
||||||
|
|
||||||
|
if userPayload == nil {
|
||||||
|
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
|
||||||
|
user = newUser
|
||||||
|
if apiErr != nil {
|
||||||
|
zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error())
|
||||||
|
return "", apiErr
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user = &userPayload.User
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenStore, err := baseauth.GenerateJWTForUser(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to generate token for SSO login user", err)
|
zap.S().Errorf("failed to generate token for SSO login user", err)
|
||||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||||
@@ -33,7 +91,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
|
|||||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||||
redirectUri,
|
redirectUri,
|
||||||
tokenStore.AccessJwt,
|
tokenStore.AccessJwt,
|
||||||
userPayload.User.Id,
|
user.Id,
|
||||||
tokenStore.RefreshJwt), nil
|
tokenStore.RefreshJwt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,10 +120,10 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
|
|||||||
|
|
||||||
// PrecheckLogin is called when the login or signup page is loaded
|
// PrecheckLogin is called when the login or signup page is loaded
|
||||||
// to check sso login is to be prompted
|
// to check sso login is to be prompted
|
||||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
|
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*basemodel.PrecheckResponse, basemodel.BaseApiError) {
|
||||||
|
|
||||||
// assume user is valid unless proven otherwise
|
// assume user is valid unless proven otherwise
|
||||||
resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
resp := &basemodel.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||||
|
|
||||||
// check if email is a valid user
|
// check if email is a valid user
|
||||||
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
||||||
@@ -76,6 +134,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
if userPayload == nil {
|
if userPayload == nil {
|
||||||
resp.IsUser = false
|
resp.IsUser = false
|
||||||
}
|
}
|
||||||
|
|
||||||
ssoAvailable := true
|
ssoAvailable := true
|
||||||
err := m.checkFeature(model.SSO)
|
err := m.checkFeature(model.SSO)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -91,6 +150,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
|||||||
|
|
||||||
if ssoAvailable {
|
if ssoAvailable {
|
||||||
|
|
||||||
|
resp.IsUser = true
|
||||||
|
|
||||||
// find domain from email
|
// find domain from email
|
||||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||||
if apierr != nil {
|
if apierr != nil {
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -28,29 +28,70 @@ type StoredDomain struct {
|
|||||||
|
|
||||||
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
||||||
// user domain. The domain is further used to process validity of the response.
|
// user domain. The domain is further used to process validity of the response.
|
||||||
// when sending login request to IdP we send relay state as URL (site url)
|
// when sending login request to IdP we send relay state as URL (site url)
|
||||||
// with domainId as query parameter.
|
// with domainId or domainName as query parameter.
|
||||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
||||||
// derive domain id from relay state now
|
// derive domain id from relay state now
|
||||||
var domainIdStr string
|
var domainIdStr string
|
||||||
|
var domainNameStr string
|
||||||
|
var domain *model.OrgDomain
|
||||||
|
|
||||||
for k, v := range relayState.Query() {
|
for k, v := range relayState.Query() {
|
||||||
if k == "domainId" && len(v) > 0 {
|
if k == "domainId" && len(v) > 0 {
|
||||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||||
}
|
}
|
||||||
|
if k == "domainName" && len(v) > 0 {
|
||||||
|
domainNameStr = v[0]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
domainId, err := uuid.Parse(domainIdStr)
|
if domainIdStr != "" {
|
||||||
|
domainId, err := uuid.Parse(domainIdStr)
|
||||||
|
if err != nil {
|
||||||
|
zap.S().Errorf("failed to parse domainId from relay state", err)
|
||||||
|
return nil, fmt.Errorf("failed to parse domainId from IdP response")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain, err = m.GetDomain(ctx, domainId)
|
||||||
|
if (err != nil) || domain == nil {
|
||||||
|
zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error())
|
||||||
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if domainNameStr != "" {
|
||||||
|
|
||||||
|
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
|
||||||
|
domain = domainFromDB
|
||||||
|
if (err != nil) || domain == nil {
|
||||||
|
zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error())
|
||||||
|
return nil, fmt.Errorf("invalid credentials")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if domain != nil {
|
||||||
|
return domain, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("failed to find domain received in IdP response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDomainByName returns org domain for a given domain name
|
||||||
|
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||||
|
|
||||||
|
stored := StoredDomain{}
|
||||||
|
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to parse domain id from relay state", err)
|
if err == sql.ErrNoRows {
|
||||||
return nil, fmt.Errorf("failed to parse response from IdP response")
|
return nil, model.BadRequest(fmt.Errorf("invalid domain name"))
|
||||||
|
}
|
||||||
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
domain, err := m.GetDomain(ctx, domainId)
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if (err != nil) || domain == nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
|
return nil, model.InternalError(err)
|
||||||
return nil, fmt.Errorf("invalid credentials")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +110,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
|
|||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return domain, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
@@ -206,7 +247,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
|
|||||||
|
|
||||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||||
return domain, model.InternalError(err)
|
return nil, model.InternalError(err)
|
||||||
}
|
}
|
||||||
return domain, nil
|
return domain, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,14 +3,15 @@ package sqlite
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
|
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
|
||||||
_, err := m.DB().ExecContext(ctx,
|
result, err := m.DB().ExecContext(ctx,
|
||||||
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
||||||
p.UserID,
|
p.UserID,
|
||||||
p.Token,
|
p.Token,
|
||||||
@@ -19,9 +20,15 @@ func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseAp
|
|||||||
p.ExpiresAt)
|
p.ExpiresAt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||||
return model.InternalError(fmt.Errorf("PAT insertion failed"))
|
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
}
|
}
|
||||||
return nil
|
id, err := result.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
zap.S().Errorf("Failed to get last inserted id, err: %v", zap.Error(err))
|
||||||
|
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||||
|
}
|
||||||
|
p.Id = strconv.Itoa(int(id))
|
||||||
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
||||||
@@ -90,7 +97,7 @@ func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.U
|
|||||||
u.org_id,
|
u.org_id,
|
||||||
u.group_id
|
u.group_id
|
||||||
FROM users u, personal_access_tokens p
|
FROM users u, personal_access_tokens p
|
||||||
WHERE u.id = p.user_id and p.token=?;`
|
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
|
||||||
|
|
||||||
if err := m.DB().Select(&users, query, token); err != nil {
|
if err := m.DB().Select(&users, query, token); err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"go.signoz.io/signoz/ee/query-service/constants"
|
"go.signoz.io/signoz/ee/query-service/constants"
|
||||||
"go.signoz.io/signoz/ee/query-service/model"
|
"go.signoz.io/signoz/ee/query-service/model"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var C *Client
|
var C *Client
|
||||||
@@ -51,7 +51,7 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
|
|||||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
httpBody, err := ioutil.ReadAll(httpResponse.Body)
|
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
|
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
|
||||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||||
@@ -91,7 +91,7 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
|
|||||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(response.Body)
|
body, err := io.ReadAll(response.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
|
||||||
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||||
@@ -203,7 +204,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
zap.S().Errorf("License validation completed with error", reterr)
|
zap.S().Errorf("License validation completed with error", reterr)
|
||||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||||
map[string]interface{}{"err": reterr.Error()})
|
map[string]interface{}{"err": reterr.Error()}, "")
|
||||||
} else {
|
} else {
|
||||||
zap.S().Info("License validation completed with no errors")
|
zap.S().Info("License validation completed with no errors")
|
||||||
}
|
}
|
||||||
@@ -259,8 +260,11 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
|||||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if errResponse != nil {
|
if errResponse != nil {
|
||||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||||
map[string]interface{}{"err": errResponse.Err.Error()})
|
if err == nil {
|
||||||
|
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||||
|
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
@@ -81,17 +81,35 @@ func main() {
|
|||||||
|
|
||||||
// the url used to build link in the alert messages in slack and other systems
|
// the url used to build link in the alert messages in slack and other systems
|
||||||
var ruleRepoURL string
|
var ruleRepoURL string
|
||||||
|
var cluster string
|
||||||
|
|
||||||
|
var cacheConfigPath, fluxInterval string
|
||||||
var enableQueryServiceLogOTLPExport bool
|
var enableQueryServiceLogOTLPExport bool
|
||||||
|
var preferDelta bool
|
||||||
|
var preferSpanMetrics bool
|
||||||
|
|
||||||
|
var maxIdleConns int
|
||||||
|
var maxOpenConns int
|
||||||
|
var dialTimeout time.Duration
|
||||||
|
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
|
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
||||||
|
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||||
|
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||||
|
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||||
|
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
|
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||||
|
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
||||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||||
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||||
|
|
||||||
zap.ReplaceGlobals(loggerMgr)
|
zap.ReplaceGlobals(loggerMgr)
|
||||||
defer loggerMgr.Sync() // flushes buffer, if any
|
defer loggerMgr.Sync() // flushes buffer, if any
|
||||||
|
|
||||||
@@ -102,9 +120,17 @@ func main() {
|
|||||||
HTTPHostPort: baseconst.HTTPHostPort,
|
HTTPHostPort: baseconst.HTTPHostPort,
|
||||||
PromConfigPath: promConfigPath,
|
PromConfigPath: promConfigPath,
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||||
|
PreferDelta: preferDelta,
|
||||||
|
PreferSpanMetrics: preferSpanMetrics,
|
||||||
PrivateHostPort: baseconst.PrivateHostPort,
|
PrivateHostPort: baseconst.PrivateHostPort,
|
||||||
DisableRules: disableRules,
|
DisableRules: disableRules,
|
||||||
RuleRepoURL: ruleRepoURL,
|
RuleRepoURL: ruleRepoURL,
|
||||||
|
MaxIdleConns: maxIdleConns,
|
||||||
|
MaxOpenConns: maxOpenConns,
|
||||||
|
DialTimeout: dialTimeout,
|
||||||
|
CacheConfigPath: cacheConfigPath,
|
||||||
|
FluxInterval: fluxInterval,
|
||||||
|
Cluster: cluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the jwt secret key
|
// Read the jwt secret key
|
||||||
@@ -138,6 +164,7 @@ func main() {
|
|||||||
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
||||||
case <-signalsChannel:
|
case <-signalsChannel:
|
||||||
logger.Fatal("Received OS Interrupt Signal ... ")
|
logger.Fatal("Received OS Interrupt Signal ... ")
|
||||||
|
server.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,18 +4,9 @@ import (
|
|||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PrecheckResponse contains login precheck response
|
|
||||||
type PrecheckResponse struct {
|
|
||||||
SSO bool `json:"sso"`
|
|
||||||
SsoUrl string `json:"ssoUrl"`
|
|
||||||
CanSelfRegister bool `json:"canSelfRegister"`
|
|
||||||
IsUser bool `json:"isUser"`
|
|
||||||
SsoError string `json:"ssoError"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GettableInvitation overrides base object and adds precheck into
|
// GettableInvitation overrides base object and adds precheck into
|
||||||
// response
|
// response
|
||||||
type GettableInvitation struct {
|
type GettableInvitation struct {
|
||||||
*basemodel.InvitationResponseObject
|
*basemodel.InvitationResponseObject
|
||||||
Precheck *PrecheckResponse `json:"precheck"`
|
Precheck *basemodel.PrecheckResponse `json:"precheck"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package model
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,7 +62,6 @@ func InternalError(err error) *ApiError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// InternalErrorStr returns a ApiError object of internal type for string input
|
// InternalErrorStr returns a ApiError object of internal type for string input
|
||||||
func InternalErrorStr(s string) *ApiError {
|
func InternalErrorStr(s string) *ApiError {
|
||||||
return &ApiError{
|
return &ApiError{
|
||||||
@@ -69,6 +69,7 @@ func InternalErrorStr(s string) *ApiError {
|
|||||||
Err: fmt.Errorf(s),
|
Err: fmt.Errorf(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrorNone basemodel.ErrorType = ""
|
ErrorNone basemodel.ErrorType = ""
|
||||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
ErrorTimeout basemodel.ErrorType = "timeout"
|
||||||
|
|||||||
@@ -89,3 +89,18 @@ func (l *License) ParseFeatures() {
|
|||||||
l.FeatureSet = BasicPlan
|
l.FeatureSet = BasicPlan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Licenses struct {
|
||||||
|
TrialStart int64 `json:"trialStart"`
|
||||||
|
TrialEnd int64 `json:"trialEnd"`
|
||||||
|
OnTrial bool `json:"onTrial"`
|
||||||
|
WorkSpaceBlock bool `json:"workSpaceBlock"`
|
||||||
|
TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
|
||||||
|
GracePeriodEnd int64 `json:"gracePeriodEnd"`
|
||||||
|
Licenses []License `json:"licenses"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubscriptionServerResp struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data Licenses `json:"data"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,5 +6,5 @@ type PAT struct {
|
|||||||
Token string `json:"token" db:"token"`
|
Token string `json:"token" db:"token"`
|
||||||
Name string `json:"name" db:"name"`
|
Name string `json:"name" db:"name"`
|
||||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
|
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ const Basic = "BASIC_PLAN"
|
|||||||
const Pro = "PRO_PLAN"
|
const Pro = "PRO_PLAN"
|
||||||
const Enterprise = "ENTERPRISE_PLAN"
|
const Enterprise = "ENTERPRISE_PLAN"
|
||||||
const DisableUpsell = "DISABLE_UPSELL"
|
const DisableUpsell = "DISABLE_UPSELL"
|
||||||
|
const Onboarding = "ONBOARDING"
|
||||||
|
const ChatSupport = "CHAT_SUPPORT"
|
||||||
|
|
||||||
var BasicPlan = basemodel.FeatureSet{
|
var BasicPlan = basemodel.FeatureSet{
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
@@ -50,14 +52,56 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
Name: basemodel.QueryBuilderPanels,
|
Name: basemodel.QueryBuilderPanels,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 5,
|
UsageLimit: 20,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
basemodel.Feature{
|
basemodel.Feature{
|
||||||
Name: basemodel.QueryBuilderAlerts,
|
Name: basemodel.QueryBuilderAlerts,
|
||||||
Active: true,
|
Active: true,
|
||||||
Usage: 0,
|
Usage: 0,
|
||||||
UsageLimit: 5,
|
UsageLimit: 10,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelSlack,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelWebhook,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelPagerduty,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelOpsgenie,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -105,6 +149,48 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelSlack,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelWebhook,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelPagerduty,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelOpsgenie,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var EnterprisePlan = basemodel.FeatureSet{
|
var EnterprisePlan = basemodel.FeatureSet{
|
||||||
@@ -150,4 +236,60 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelSlack,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelWebhook,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelPagerduty,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelOpsgenie,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.AlertChannelMsTeams,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: Onboarding,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: ChatSupport,
|
||||||
|
Active: true,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClickHouse/clickhouse-go/v2"
|
"github.com/ClickHouse/clickhouse-go/v2"
|
||||||
|
"github.com/go-co-op/gocron"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
@@ -28,9 +29,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// send usage every 24 hour
|
|
||||||
uploadFrequency = 24 * time.Hour
|
|
||||||
|
|
||||||
locker = stateUnlocked
|
locker = stateUnlocked
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -39,12 +37,7 @@ type Manager struct {
|
|||||||
|
|
||||||
licenseRepo *license.Repo
|
licenseRepo *license.Repo
|
||||||
|
|
||||||
// end the usage routine, this is important to gracefully
|
scheduler *gocron.Scheduler
|
||||||
// stopping usage reporting and protect in-consistent updates
|
|
||||||
done chan struct{}
|
|
||||||
|
|
||||||
// terminated waits for the UsageExporter go routine to end
|
|
||||||
terminated chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||||
@@ -53,6 +46,7 @@ func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn c
|
|||||||
// repository: repo,
|
// repository: repo,
|
||||||
clickhouseConn: clickhouseConn,
|
clickhouseConn: clickhouseConn,
|
||||||
licenseRepo: licenseRepo,
|
licenseRepo: licenseRepo,
|
||||||
|
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
@@ -64,37 +58,30 @@ func (lm *Manager) Start() error {
|
|||||||
return fmt.Errorf("usage exporter is locked")
|
return fmt.Errorf("usage exporter is locked")
|
||||||
}
|
}
|
||||||
|
|
||||||
go lm.UsageExporter(context.Background())
|
_, err := lm.scheduler.Do(func() { lm.UploadUsage() })
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// upload usage once when starting the service
|
||||||
|
lm.UploadUsage()
|
||||||
|
|
||||||
|
lm.scheduler.StartAsync()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (lm *Manager) UploadUsage() {
|
||||||
func (lm *Manager) UsageExporter(ctx context.Context) {
|
ctx := context.Background()
|
||||||
defer close(lm.terminated)
|
|
||||||
|
|
||||||
uploadTicker := time.NewTicker(uploadFrequency)
|
|
||||||
defer uploadTicker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-lm.done:
|
|
||||||
return
|
|
||||||
case <-uploadTicker.C:
|
|
||||||
lm.UploadUsage(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lm *Manager) UploadUsage(ctx context.Context) error {
|
|
||||||
// check if license is present or not
|
// check if license is present or not
|
||||||
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
|
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get active license")
|
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if license == nil {
|
if license == nil {
|
||||||
// we will not start the usage reporting if license is not present.
|
// we will not start the usage reporting if license is not present.
|
||||||
zap.S().Info("no license present, skipping usage reporting")
|
zap.S().Info("no license present, skipping usage reporting")
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
usages := []model.UsageDB{}
|
usages := []model.UsageDB{}
|
||||||
@@ -120,7 +107,8 @@ func (lm *Manager) UploadUsage(ctx context.Context) error {
|
|||||||
dbusages := []model.UsageDB{}
|
dbusages := []model.UsageDB{}
|
||||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||||
return err
|
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
for _, u := range dbusages {
|
for _, u := range dbusages {
|
||||||
u.Type = db
|
u.Type = db
|
||||||
@@ -130,7 +118,7 @@ func (lm *Manager) UploadUsage(ctx context.Context) error {
|
|||||||
|
|
||||||
if len(usages) <= 0 {
|
if len(usages) <= 0 {
|
||||||
zap.S().Info("no snapshots to upload, skipping.")
|
zap.S().Info("no snapshots to upload, skipping.")
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Info("uploading usage data")
|
zap.S().Info("uploading usage data")
|
||||||
@@ -139,13 +127,15 @@ func (lm *Manager) UploadUsage(ctx context.Context) error {
|
|||||||
for _, usage := range usages {
|
for _, usage := range usages {
|
||||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
usageData := model.Usage{}
|
usageData := model.Usage{}
|
||||||
err = json.Unmarshal(usageDataBytes, &usageData)
|
err = json.Unmarshal(usageDataBytes, &usageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
usageData.CollectorID = usage.CollectorID
|
usageData.CollectorID = usage.CollectorID
|
||||||
@@ -160,20 +150,16 @@ func (lm *Manager) UploadUsage(ctx context.Context) error {
|
|||||||
LicenseKey: key,
|
LicenseKey: key,
|
||||||
Usage: usagesPayload,
|
Usage: usagesPayload,
|
||||||
}
|
}
|
||||||
err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
|
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) {
|
||||||
for i := 1; i <= MaxRetries; i++ {
|
for i := 1; i <= MaxRetries; i++ {
|
||||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||||
if apiErr != nil && i == MaxRetries {
|
if apiErr != nil && i == MaxRetries {
|
||||||
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
||||||
// not returning error here since it is captured in the failed count
|
// not returning error here since it is captured in the failed count
|
||||||
return nil
|
return
|
||||||
} else if apiErr != nil {
|
} else if apiErr != nil {
|
||||||
// sleeping for exponential backoff
|
// sleeping for exponential backoff
|
||||||
sleepDuration := RetryInterval * time.Duration(i)
|
sleepDuration := RetryInterval * time.Duration(i)
|
||||||
@@ -183,11 +169,14 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lm *Manager) Stop() {
|
func (lm *Manager) Stop() {
|
||||||
close(lm.done)
|
lm.scheduler.Stop()
|
||||||
|
|
||||||
|
zap.S().Debug("sending usage data before shutting down")
|
||||||
|
// send usage before shutting down
|
||||||
|
lm.UploadUsage()
|
||||||
|
|
||||||
atomic.StoreUint32(&locker, stateUnlocked)
|
atomic.StoreUint32(&locker, stateUnlocked)
|
||||||
<-lm.terminated
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
node_modules
|
node_modules
|
||||||
.vscode
|
.vscode
|
||||||
build
|
|
||||||
.env
|
|
||||||
.git
|
.git
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ module.exports = {
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||||
|
'no-plusplus': 'off',
|
||||||
'jsx-a11y/label-has-associated-control': [
|
'jsx-a11y/label-has-associated-control': [
|
||||||
'error',
|
'error',
|
||||||
{
|
{
|
||||||
@@ -109,7 +110,6 @@ module.exports = {
|
|||||||
// eslint rules need to remove
|
// eslint rules need to remove
|
||||||
'@typescript-eslint/no-shadow': 'off',
|
'@typescript-eslint/no-shadow': 'off',
|
||||||
'import/no-cycle': 'off',
|
'import/no-cycle': 'off',
|
||||||
|
|
||||||
'prettier/prettier': [
|
'prettier/prettier': [
|
||||||
'error',
|
'error',
|
||||||
{},
|
{},
|
||||||
|
|||||||
@@ -2,3 +2,19 @@
|
|||||||
. "$(dirname "$0")/_/husky.sh"
|
. "$(dirname "$0")/_/husky.sh"
|
||||||
|
|
||||||
cd frontend && yarn run commitlint --edit $1
|
cd frontend && yarn run commitlint --edit $1
|
||||||
|
|
||||||
|
branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
|
||||||
|
color_red="$(tput setaf 1)"
|
||||||
|
bold="$(tput bold)"
|
||||||
|
reset="$(tput sgr0)"
|
||||||
|
|
||||||
|
if [ "$branch" = "main" ]; then
|
||||||
|
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$branch" = "develop" ]; then
|
||||||
|
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
6
frontend/.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Ignore artifacts:
|
||||||
|
build
|
||||||
|
coverage
|
||||||
|
|
||||||
|
# Ignore all MD files:
|
||||||
|
**/*.md
|
||||||
@@ -1,38 +1,17 @@
|
|||||||
# Builder stage
|
FROM nginx:1.25.2-alpine
|
||||||
FROM node:16.15.0 as builder
|
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
|
|
||||||
ARG TARGETOS=linux
|
# Set working directory
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
WORKDIR /frontend
|
WORKDIR /frontend
|
||||||
|
|
||||||
# Copy the package.json and .yarnrc files prior to install dependencies
|
|
||||||
COPY package.json ./
|
|
||||||
# Copy lock file
|
|
||||||
COPY yarn.lock ./
|
|
||||||
COPY .yarnrc ./
|
|
||||||
|
|
||||||
# Install the dependencies and make the folder
|
|
||||||
RUN CI=1 yarn install
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build the project and copy the files
|
|
||||||
RUN yarn build
|
|
||||||
|
|
||||||
|
|
||||||
FROM nginx:1.18-alpine
|
|
||||||
|
|
||||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
# Remove default nginx index page
|
# Remove default nginx index page
|
||||||
RUN rm -rf /usr/share/nginx/html/*
|
RUN rm -rf /usr/share/nginx/html/*
|
||||||
|
|
||||||
# Copy from the stahg 1
|
# Copy custom nginx config and static files
|
||||||
COPY --from=builder /frontend/build /usr/share/nginx/html
|
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||||
|
COPY build /usr/share/nginx/html
|
||||||
|
|
||||||
EXPOSE 3301
|
EXPOSE 3301
|
||||||
|
|
||||||
|
|||||||
7
frontend/example.env
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
NODE_ENV="development"
|
||||||
|
BUNDLE_ANALYSER="true"
|
||||||
|
FRONTEND_API_ENDPOINT="http://localhost:3301/"
|
||||||
|
INTERCOM_APP_ID="intercom-app-id"
|
||||||
|
|
||||||
|
PLAYWRIGHT_TEST_BASE_URL="http://localhost:3301"
|
||||||
|
CI="1"
|
||||||
@@ -7,7 +7,7 @@ const config: Config.InitialOptions = {
|
|||||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
|
||||||
modulePathIgnorePatterns: ['dist'],
|
modulePathIgnorePatterns: ['dist'],
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||||
},
|
},
|
||||||
globals: {
|
globals: {
|
||||||
extensionsToTreatAsEsm: ['.ts'],
|
extensionsToTreatAsEsm: ['.ts'],
|
||||||
@@ -21,7 +21,9 @@ const config: Config.InitialOptions = {
|
|||||||
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
},
|
},
|
||||||
transformIgnorePatterns: ['node_modules/(?!(lodash-es)/)'],
|
transformIgnorePatterns: [
|
||||||
|
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios)/)',
|
||||||
|
],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
moduleDirectories: ['node_modules', 'src'],
|
moduleDirectories: ['node_modules', 'src'],
|
||||||
|
|||||||
@@ -8,6 +8,9 @@
|
|||||||
import '@testing-library/jest-dom';
|
import '@testing-library/jest-dom';
|
||||||
import 'jest-styled-components';
|
import 'jest-styled-components';
|
||||||
|
|
||||||
|
import { server } from './src/mocks-server/server';
|
||||||
|
// Establish API mocking before all tests.
|
||||||
|
|
||||||
// Mock window.matchMedia
|
// Mock window.matchMedia
|
||||||
window.matchMedia =
|
window.matchMedia =
|
||||||
window.matchMedia ||
|
window.matchMedia ||
|
||||||
@@ -18,3 +21,9 @@ window.matchMedia =
|
|||||||
removeListener: function () {},
|
removeListener: function () {},
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
beforeAll(() => server.listen());
|
||||||
|
|
||||||
|
afterEach(() => server.resetHandlers());
|
||||||
|
|
||||||
|
afterAll(() => server.close());
|
||||||
|
|||||||
@@ -29,34 +29,43 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ant-design/colors": "6.0.0",
|
"@ant-design/colors": "6.0.0",
|
||||||
"@ant-design/icons": "4.8.0",
|
"@ant-design/icons": "4.8.0",
|
||||||
"@grafana/data": "^8.4.3",
|
"@dnd-kit/core": "6.1.0",
|
||||||
|
"@dnd-kit/modifiers": "7.0.0",
|
||||||
|
"@dnd-kit/sortable": "8.0.0",
|
||||||
|
"@grafana/data": "^9.5.2",
|
||||||
|
"@mdx-js/loader": "2.3.0",
|
||||||
|
"@mdx-js/react": "2.3.0",
|
||||||
"@monaco-editor/react": "^4.3.1",
|
"@monaco-editor/react": "^4.3.1",
|
||||||
|
"@uiw/react-md-editor": "3.23.5",
|
||||||
"@xstate/react": "^3.0.0",
|
"@xstate/react": "^3.0.0",
|
||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"antd": "5.0.5",
|
"antd": "5.11.0",
|
||||||
"antd-table-saveas-excel": "2.2.1",
|
"antd-table-saveas-excel": "2.2.1",
|
||||||
"axios": "^0.21.0",
|
"axios": "1.6.2",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^26.6.0",
|
"babel-jest": "^29.6.4",
|
||||||
"babel-loader": "8.1.0",
|
"babel-loader": "9.1.3",
|
||||||
"babel-plugin-named-asset-import": "^0.3.7",
|
"babel-plugin-named-asset-import": "^0.3.7",
|
||||||
"babel-preset-minify": "^0.5.1",
|
"babel-preset-minify": "^0.5.1",
|
||||||
"babel-preset-react-app": "^10.0.0",
|
"babel-preset-react-app": "^10.0.1",
|
||||||
"chart.js": "3.9.1",
|
"chart.js": "3.9.1",
|
||||||
"chartjs-adapter-date-fns": "^2.0.0",
|
"chartjs-adapter-date-fns": "^2.0.0",
|
||||||
"chartjs-plugin-annotation": "^1.4.0",
|
"chartjs-plugin-annotation": "^1.4.0",
|
||||||
|
"classnames": "2.3.2",
|
||||||
"color": "^4.2.1",
|
"color": "^4.2.1",
|
||||||
|
"color-alpha": "1.1.3",
|
||||||
"cross-env": "^7.0.3",
|
"cross-env": "^7.0.3",
|
||||||
"css-loader": "4.3.0",
|
"css-loader": "5.0.0",
|
||||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
"css-minimizer-webpack-plugin": "5.0.1",
|
||||||
"dayjs": "^1.10.7",
|
"dayjs": "^1.10.7",
|
||||||
"dompurify": "3.0.0",
|
"dompurify": "3.0.0",
|
||||||
"dotenv": "8.2.0",
|
"dotenv": "8.2.0",
|
||||||
"event-source-polyfill": "1.0.31",
|
"event-source-polyfill": "1.0.31",
|
||||||
|
"eventemitter3": "5.0.1",
|
||||||
"file-loader": "6.1.1",
|
"file-loader": "6.1.1",
|
||||||
"fontfaceobserver": "2.3.0",
|
"fontfaceobserver": "2.3.0",
|
||||||
"history": "4.10.1",
|
"history": "4.10.1",
|
||||||
"html-webpack-plugin": "5.1.0",
|
"html-webpack-plugin": "5.5.0",
|
||||||
"i18next": "^21.6.12",
|
"i18next": "^21.6.12",
|
||||||
"i18next-browser-languagedetector": "^6.1.3",
|
"i18next-browser-languagedetector": "^6.1.3",
|
||||||
"i18next-http-backend": "^1.3.2",
|
"i18next-http-backend": "^1.3.2",
|
||||||
@@ -65,33 +74,43 @@
|
|||||||
"less": "^4.1.2",
|
"less": "^4.1.2",
|
||||||
"less-loader": "^10.2.0",
|
"less-loader": "^10.2.0",
|
||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
|
"lucide-react": "0.288.0",
|
||||||
"mini-css-extract-plugin": "2.4.5",
|
"mini-css-extract-plugin": "2.4.5",
|
||||||
"papaparse": "5.4.1",
|
"papaparse": "5.4.1",
|
||||||
"react": "18.2.0",
|
"react": "18.2.0",
|
||||||
|
"react-addons-update": "15.6.3",
|
||||||
|
"react-dnd": "16.0.1",
|
||||||
|
"react-dnd-html5-backend": "16.0.1",
|
||||||
"react-dom": "18.2.0",
|
"react-dom": "18.2.0",
|
||||||
"react-force-graph": "^1.41.0",
|
"react-drag-listview": "2.0.0",
|
||||||
|
"react-error-boundary": "4.0.11",
|
||||||
|
"react-force-graph": "^1.43.0",
|
||||||
|
"react-full-screen": "1.1.1",
|
||||||
"react-grid-layout": "^1.3.4",
|
"react-grid-layout": "^1.3.4",
|
||||||
|
"react-helmet-async": "1.3.0",
|
||||||
"react-i18next": "^11.16.1",
|
"react-i18next": "^11.16.1",
|
||||||
"react-intersection-observer": "9.4.1",
|
"react-markdown": "8.0.7",
|
||||||
"react-query": "^3.34.19",
|
"react-query": "3.39.3",
|
||||||
"react-redux": "^7.2.2",
|
"react-redux": "^7.2.2",
|
||||||
"react-router-dom": "^5.2.0",
|
"react-router-dom": "^5.2.0",
|
||||||
|
"react-syntax-highlighter": "15.5.0",
|
||||||
"react-use": "^17.3.2",
|
"react-use": "^17.3.2",
|
||||||
"react-virtuoso": "4.0.3",
|
"react-virtuoso": "4.0.3",
|
||||||
"redux": "^4.0.5",
|
"redux": "^4.0.5",
|
||||||
"redux-thunk": "^2.3.0",
|
"redux-thunk": "^2.3.0",
|
||||||
"stream": "^0.0.2",
|
"stream": "^0.0.2",
|
||||||
"style-loader": "1.3.0",
|
"style-loader": "1.3.0",
|
||||||
"styled-components": "^5.2.1",
|
"styled-components": "^5.3.11",
|
||||||
"terser-webpack-plugin": "^5.2.5",
|
"terser-webpack-plugin": "^5.2.5",
|
||||||
"timestamp-nano": "^1.0.0",
|
"timestamp-nano": "^1.0.0",
|
||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||||
"typescript": "^4.0.5",
|
"typescript": "^4.0.5",
|
||||||
|
"uplot": "1.6.26",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-vitals": "^0.2.4",
|
"web-vitals": "^0.2.4",
|
||||||
"webpack": "^5.23.0",
|
"webpack": "5.88.2",
|
||||||
"webpack-dev-server": "^4.3.1",
|
"webpack-dev-server": "^4.15.1",
|
||||||
"xstate": "^4.31.0"
|
"xstate": "^4.31.0"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
@@ -107,13 +126,13 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/core": "^7.12.3",
|
"@babel/core": "^7.22.11",
|
||||||
"@babel/plugin-proposal-class-properties": "^7.12.13",
|
"@babel/plugin-proposal-class-properties": "^7.18.6",
|
||||||
"@babel/plugin-syntax-jsx": "^7.12.13",
|
"@babel/plugin-syntax-jsx": "^7.12.13",
|
||||||
"@babel/preset-env": "^7.12.17",
|
"@babel/preset-env": "^7.22.14",
|
||||||
"@babel/preset-react": "^7.12.13",
|
"@babel/preset-react": "^7.12.13",
|
||||||
"@babel/preset-typescript": "^7.12.17",
|
"@babel/preset-typescript": "^7.21.4",
|
||||||
"@commitlint/cli": "^16.2.4",
|
"@commitlint/cli": "^16.3.0",
|
||||||
"@commitlint/config-conventional": "^16.2.4",
|
"@commitlint/config-conventional": "^16.2.4",
|
||||||
"@jest/globals": "^27.5.1",
|
"@jest/globals": "^27.5.1",
|
||||||
"@playwright/test": "^1.22.0",
|
"@playwright/test": "^1.22.0",
|
||||||
@@ -132,30 +151,33 @@
|
|||||||
"@types/node": "^16.10.3",
|
"@types/node": "^16.10.3",
|
||||||
"@types/papaparse": "5.3.7",
|
"@types/papaparse": "5.3.7",
|
||||||
"@types/react": "18.0.26",
|
"@types/react": "18.0.26",
|
||||||
|
"@types/react-addons-update": "0.14.21",
|
||||||
"@types/react-dom": "18.0.10",
|
"@types/react-dom": "18.0.10",
|
||||||
"@types/react-grid-layout": "^1.1.2",
|
"@types/react-grid-layout": "^1.1.2",
|
||||||
|
"@types/react-helmet-async": "1.0.3",
|
||||||
"@types/react-redux": "^7.1.11",
|
"@types/react-redux": "^7.1.11",
|
||||||
"@types/react-resizable": "3.0.3",
|
"@types/react-resizable": "3.0.3",
|
||||||
"@types/react-router-dom": "^5.1.6",
|
"@types/react-router-dom": "^5.1.6",
|
||||||
|
"@types/react-syntax-highlighter": "15.5.7",
|
||||||
|
"@types/redux-mock-store": "1.0.4",
|
||||||
"@types/styled-components": "^5.1.4",
|
"@types/styled-components": "^5.1.4",
|
||||||
"@types/uuid": "^8.3.1",
|
"@types/uuid": "^8.3.1",
|
||||||
"@types/webpack": "^5.28.0",
|
"@types/webpack": "^5.28.0",
|
||||||
"@types/webpack-dev-server": "^4.3.0",
|
"@types/webpack-dev-server": "^4.7.2",
|
||||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||||
"@typescript-eslint/parser": "^4.28.2",
|
"@typescript-eslint/parser": "^4.33.0",
|
||||||
"@welldone-software/why-did-you-render": "6.2.1",
|
|
||||||
"autoprefixer": "^9.0.0",
|
"autoprefixer": "^9.0.0",
|
||||||
"babel-plugin-styled-components": "^1.12.0",
|
"babel-plugin-styled-components": "^1.12.0",
|
||||||
"compression-webpack-plugin": "9.0.0",
|
"compression-webpack-plugin": "9.0.0",
|
||||||
"copy-webpack-plugin": "^8.1.0",
|
"copy-webpack-plugin": "^8.1.0",
|
||||||
"critters-webpack-plugin": "^3.0.1",
|
"critters-webpack-plugin": "^3.0.1",
|
||||||
"eslint": "^7.30.0",
|
"eslint": "^7.32.0",
|
||||||
"eslint-config-airbnb": "^19.0.4",
|
"eslint-config-airbnb": "^19.0.4",
|
||||||
"eslint-config-airbnb-typescript": "^16.1.4",
|
"eslint-config-airbnb-typescript": "^16.1.4",
|
||||||
"eslint-config-prettier": "^8.3.0",
|
"eslint-config-prettier": "^8.3.0",
|
||||||
"eslint-config-standard": "^16.0.3",
|
"eslint-config-standard": "^16.0.3",
|
||||||
"eslint-plugin-import": "^2.25.4",
|
"eslint-plugin-import": "^2.28.1",
|
||||||
"eslint-plugin-jest": "^26.1.2",
|
"eslint-plugin-jest": "^26.9.0",
|
||||||
"eslint-plugin-jsx-a11y": "^6.5.1",
|
"eslint-plugin-jsx-a11y": "^6.5.1",
|
||||||
"eslint-plugin-node": "^11.1.0",
|
"eslint-plugin-node": "^11.1.0",
|
||||||
"eslint-plugin-prettier": "^4.0.0",
|
"eslint-plugin-prettier": "^4.0.0",
|
||||||
@@ -166,17 +188,22 @@
|
|||||||
"eslint-plugin-sonarjs": "^0.12.0",
|
"eslint-plugin-sonarjs": "^0.12.0",
|
||||||
"husky": "^7.0.4",
|
"husky": "^7.0.4",
|
||||||
"is-ci": "^3.0.1",
|
"is-ci": "^3.0.1",
|
||||||
"jest-playwright-preset": "^1.7.0",
|
"jest-playwright-preset": "^1.7.2",
|
||||||
"jest-styled-components": "^7.0.8",
|
"jest-styled-components": "^7.0.8",
|
||||||
"lint-staged": "^12.3.7",
|
"lint-staged": "^12.5.0",
|
||||||
|
"msw": "1.3.2",
|
||||||
"portfinder-sync": "^0.0.2",
|
"portfinder-sync": "^0.0.2",
|
||||||
"prettier": "2.2.1",
|
"prettier": "2.2.1",
|
||||||
|
"raw-loader": "4.0.2",
|
||||||
"react-hooks-testing-library": "0.6.0",
|
"react-hooks-testing-library": "0.6.0",
|
||||||
"react-hot-loader": "^4.13.0",
|
"react-hot-loader": "^4.13.0",
|
||||||
"react-resizable": "3.0.4",
|
"react-resizable": "3.0.4",
|
||||||
"ts-jest": "^27.1.4",
|
"redux-mock-store": "1.5.4",
|
||||||
|
"sass": "1.66.1",
|
||||||
|
"sass-loader": "13.3.2",
|
||||||
|
"ts-jest": "^27.1.5",
|
||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"typescript-plugin-css-modules": "^3.4.0",
|
"typescript-plugin-css-modules": "5.0.1",
|
||||||
"webpack-bundle-analyzer": "^4.5.0",
|
"webpack-bundle-analyzer": "^4.5.0",
|
||||||
"webpack-cli": "^4.9.2"
|
"webpack-cli": "^4.9.2"
|
||||||
},
|
},
|
||||||
@@ -187,6 +214,9 @@
|
|||||||
},
|
},
|
||||||
"resolutions": {
|
"resolutions": {
|
||||||
"@types/react": "18.0.26",
|
"@types/react": "18.0.26",
|
||||||
"@types/react-dom": "18.0.10"
|
"@types/react-dom": "18.0.10",
|
||||||
|
"debug": "4.3.4",
|
||||||
|
"semver": "7.5.4",
|
||||||
|
"xml2js": "0.5.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
BIN
frontend/public/Images/notFound404.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
1
frontend/public/Logos/cmd-terminal.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 103.53" style="enable-background:new 0 0 122.88 103.53" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M5.47,0h111.93c3.01,0,5.47,2.46,5.47,5.47v92.58c0,3.01-2.46,5.47-5.47,5.47H5.47 c-3.01,0-5.47-2.46-5.47-5.47V5.47C0,2.46,2.46,0,5.47,0L5.47,0z M31.84,38.55l17.79,18.42l2.14,2.13l-2.12,2.16L31.68,80.31 l-5.07-5l15.85-16.15L26.81,43.6L31.84,38.55L31.84,38.55z M94.1,79.41H54.69v-6.84H94.1V79.41L94.1,79.41z M38.19,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S35,9.83,38.19,9.83L38.19,9.83z M18.95,9.83 c3.19,0,5.78,2.59,5.78,5.78s-2.59,5.78-5.78,5.78c-3.19,0-5.78-2.59-5.78-5.78S15.75,9.83,18.95,9.83L18.95,9.83z M7.49,5.41 h107.91c1.15,0,2.09,0.94,2.09,2.09v18.32H5.4V7.5C5.4,6.35,6.34,5.41,7.49,5.41L7.49,5.41z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 1.0 KiB |
1
frontend/public/Logos/docker.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 88.17" style="enable-background:new 0 0 122.88 88.17" xml:space="preserve"><style type="text/css">.st0{fill:#0091E2;}</style><g><path class="st0" d="M121.68,33.34c-0.34-0.28-3.42-2.62-10.03-2.62c-1.71,0-3.48,0.17-5.19,0.46c-1.25-8.72-8.49-12.94-8.78-13.16 l-1.77-1.03l-1.14,1.65c-1.42,2.22-2.51,4.73-3.13,7.29c-1.2,4.96-0.46,9.63,2.05,13.62c-3.02,1.71-7.92,2.11-8.95,2.17l-80.93,0 c-2.11,0-3.82,1.71-3.82,3.82c-0.11,7.07,1.08,14.13,3.53,20.8c2.79,7.29,6.95,12.71,12.31,16.01c6.04,3.7,15.9,5.81,27.01,5.81 c5.01,0,10.03-0.46,14.99-1.37c6.9-1.25,13.51-3.65,19.6-7.12c5.02-2.91,9.52-6.61,13.34-10.94c6.44-7.24,10.26-15.33,13.05-22.51 c0.4,0,0.74,0,1.14,0c7.01,0,11.34-2.79,13.73-5.19c1.6-1.48,2.79-3.31,3.65-5.36l0.51-1.48L121.68,33.34L121.68,33.34z M71.59,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C70.68,38.98,71.08,39.38,71.59,39.38L71.59,39.38z M56.49,11.63h10.83c0.51,0,0.97-0.4,0.97-0.97V0.97c0-0.51-0.46-0.97-0.97-0.97 L56.49,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,11.17,55.97,11.63,56.49,11.63L56.49,11.63z M56.49,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.46-0.97-0.97-0.97H56.49c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C55.52,25.08,55.97,25.53,56.49,25.53L56.49,25.53z M41.5,25.53h10.83c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0H41.5c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C40.53,25.08,40.93,25.53,41.5,25.53L41.5,25.53z M26.28,25.53h10.83 c0.51,0,0.97-0.46,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0H26.28c-0.51,0-0.97,0.4-0.97,0.97v9.69 C25.37,25.08,25.77,25.53,26.28,25.53L26.28,25.53z M56.49,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97v-9.69c0-0.51-0.4-0.97-0.97-0.97 l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69C55.52,38.98,55.97,39.38,56.49,39.38L56.49,39.38L56.49,39.38z M41.5,39.38 h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0v9.69 C40.53,38.98,40.93,39.38,41.5,39.38L41.5,39.38L41.5,39.38z M26.28,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69 c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97v9.69C25.37,38.98,25.77,39.38,26.28,39.38L26.28,39.38z M11.35,39.38h10.83c0.51,0,0.97-0.4,0.97-0.97l0,0v-9.69c0-0.51-0.4-0.97-0.97-0.97l0,0l-10.83,0c-0.51,0-0.97,0.4-0.97,0.97l0,0 v9.69C10.44,38.98,10.84,39.38,11.35,39.38L11.35,39.38L11.35,39.38z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 2.5 KiB |
BIN
frontend/public/Logos/fluent-bit.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
frontend/public/Logos/fluentd.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
1
frontend/public/Logos/kubernetes.svg
Normal file
|
After Width: | Height: | Size: 5.9 KiB |
1
frontend/public/Logos/logstash.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 80 80" width="2500" height="2500"><style>.st0{fill:#f3bd19}.st1{fill:#231f20}.st2{fill:#3ebeb0}.st3{fill:#37a595}.st4{fill:none}</style><path class="st0" d="M41.1 41.9H15.6V12.5h7.7c9.9 0 17.8 8 17.8 17.8v11.6z"/><path class="st1" d="M41.1 67.5c-14.1 0-25.6-11.4-25.6-25.6h25.6v25.6z"/><path class="st2" d="M41.1 41.9h23.3v25.6H41.1z"/><path class="st3" d="M41.1 41.9h5.4v25.6h-5.4z"/><path class="st4" d="M0 0h80v80H0z"/></svg>
|
||||||
|
After Width: | Height: | Size: 494 B |
1
frontend/public/Logos/node-js.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 109 122.88" style="enable-background:new 0 0 109 122.88" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;fill:#689F63;}</style><g><path class="st0" d="M68.43,87.08c-19.7,0-23.83-9.04-23.83-16.63c0-0.72,0.58-1.3,1.3-1.3h5.82c0.64,0,1.18,0.47,1.28,1.1 c0.88,5.93,3.49,8.92,15.41,8.92c9.49,0,13.52-2.14,13.52-7.18c0-2.9-1.15-5.05-15.89-6.49c-12.33-1.22-19.95-3.93-19.95-13.8 c0-9.08,7.66-14.49,20.5-14.49c14.42,0,21.56,5,22.46,15.76c0.03,0.37-0.1,0.73-0.35,1c-0.25,0.26-0.6,0.42-0.96,0.42H81.9 c-0.61,0-1.14-0.43-1.26-1.01c-1.41-6.23-4.81-8.23-14.07-8.23c-10.36,0-11.56,3.61-11.56,6.31c0,3.28,1.42,4.24,15.4,6.09 c13.84,1.84,20.41,4.43,20.41,14.16c0,9.81-8.18,15.43-22.45,15.43L68.43,87.08L68.43,87.08z M54.52,122.88 c-1.65,0-3.28-0.43-4.72-1.26l-15.03-8.9c-2.25-1.26-1.15-1.7-0.41-1.96c2.99-1.05,3.6-1.28,6.8-3.1c0.34-0.19,0.78-0.12,1.12,0.08 l11.55,6.85c0.42,0.23,1.01,0.23,1.4,0l45.03-25.99c0.42-0.24,0.69-0.72,0.69-1.22V35.43c0-0.52-0.27-0.98-0.7-1.24L55.23,8.22 c-0.42-0.25-0.97-0.25-1.39,0l-45,25.97c-0.44,0.25-0.71,0.73-0.71,1.23v51.96c0,0.5,0.27,0.97,0.7,1.21l12.33,7.12 c6.69,3.35,10.79-0.6,10.79-4.56V39.86c0-0.73,0.57-1.3,1.31-1.3l5.7,0c0.71,0,1.3,0.56,1.3,1.3v51.31 c0,8.93-4.87,14.05-13.33,14.05c-2.6,0-4.66,0-10.38-2.82L4.72,95.59C1.8,93.9,0,90.75,0,87.38V35.42c0-3.38,1.8-6.54,4.72-8.21 l45.07-26c2.85-1.61,6.64-1.61,9.47,0l45.02,26.01c2.91,1.68,4.72,4.82,4.72,8.21v51.96c0,3.37-1.81,6.51-4.72,8.21l-45.02,26 c-1.44,0.83-3.08,1.26-4.74,1.26L54.52,122.88L54.52,122.88z M54.52,122.88L54.52,122.88L54.52,122.88L54.52,122.88z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 1.7 KiB |
1
frontend/public/Logos/software-window.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 122.88 98.18" style="enable-background:new 0 0 122.88 98.18" xml:space="preserve"><style type="text/css">.st0{fill-rule:evenodd;clip-rule:evenodd;} .st0{fill:#1668dc;} .st1{fill:#FFFFFF;}</style><g><path class="st0" d="M3.42,0h116.05c1.88,0,3.41,1.54,3.41,3.41v91.36c0,1.88-1.54,3.41-3.41,3.41l-116.05,0 C1.54,98.18,0,96.65,0,94.77L0,3.41C0,1.53,1.54,0,3.42,0L3.42,0L3.42,0z M25.89,8.19c2.05,0,3.72,1.67,3.72,3.72 c0,2.05-1.67,3.72-3.72,3.72c-2.05,0-3.72-1.67-3.72-3.72C22.17,9.85,23.83,8.19,25.89,8.19L25.89,8.19z M103.07,7.69l2.52,2.77 l2.52-2.77l1.97,1.79l-2.69,2.96l2.69,2.96l-1.97,1.79l-2.52-2.77l-2.52,2.77l-1.97-1.79l2.69-2.96l-2.69-2.96L103.07,7.69 L103.07,7.69z M14.52,8.19c2.05,0,3.72,1.67,3.72,3.72c0,2.05-1.67,3.72-3.72,3.72c-2.05,0-3.72-1.67-3.72-3.72 C10.79,9.85,12.46,8.19,14.52,8.19L14.52,8.19z M37.26,8.19c2.05,0,3.72,1.67,3.72,3.72c0,2.05-1.67,3.72-3.72,3.72 c-2.05,0-3.72-1.67-3.72-3.72C33.54,9.85,35.21,8.19,37.26,8.19L37.26,8.19z M14.05,22.75h93.33c1.77,0,3.22,1.49,3.22,3.22v59.2 c0,1.73-1.49,3.22-3.22,3.22l-93.33,0c-1.73,0-3.22-1.45-3.22-3.22v-59.2C10.84,24.2,12.29,22.75,14.05,22.75L14.05,22.75 L14.05,22.75z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 1.3 KiB |
9
frontend/public/Logos/syslogs.svg
Normal file
|
After Width: | Height: | Size: 6.2 KiB |
@@ -1,112 +1,114 @@
|
|||||||
{
|
{
|
||||||
"target_missing": "Please enter a threshold to proceed",
|
"target_missing": "Please enter a threshold to proceed",
|
||||||
"rule_test_fired": "Test notification sent successfully",
|
"rule_test_fired": "Test notification sent successfully",
|
||||||
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
||||||
"button_testrule": "Test Notification",
|
"button_testrule": "Test Notification",
|
||||||
"label_channel_select": "Notification Channels",
|
"label_channel_select": "Notification Channels",
|
||||||
"placeholder_channel_select": "select one or more channels",
|
"placeholder_channel_select": "select one or more channels",
|
||||||
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
||||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||||
"preview_chart_threshold_label": "Threshold",
|
"preview_chart_threshold_label": "Threshold",
|
||||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||||
"button_yes": "Yes",
|
"button_yes": "Yes",
|
||||||
"button_no": "No",
|
"button_no": "No",
|
||||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||||
"remove_label_success": "Labels cleared",
|
"remove_label_success": "Labels cleared",
|
||||||
"alert_form_step1": "Step 1 - Define the metric",
|
"alert_form_step1": "Step 1 - Define the metric",
|
||||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||||
"confirm_save_title": "Save Changes",
|
"confirm_save_title": "Save Changes",
|
||||||
"confirm_save_content_part1": "Your alert built with",
|
"confirm_save_content_part1": "Your alert built with",
|
||||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||||
"rule_created": "Rule created successfully",
|
"rule_created": "Rule created successfully",
|
||||||
"rule_edited": "Rule edited successfully",
|
"rule_edited": "Rule edited successfully",
|
||||||
"expression_missing": "expression is missing in {{where}}",
|
"expression_missing": "expression is missing in {{where}}",
|
||||||
"metricname_missing": "metric name is missing in {{where}}",
|
"metricname_missing": "metric name is missing in {{where}}",
|
||||||
"condition_required": "at least one metric condition is required",
|
"condition_required": "at least one metric condition is required",
|
||||||
"alertname_required": "alert name is required",
|
"alertname_required": "alert name is required",
|
||||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||||
"chquery_required": "query is required when query format is set to ClickHouse",
|
"chquery_required": "query is required when query format is set to ClickHouse",
|
||||||
"button_savechanges": "Save Rule",
|
"button_savechanges": "Save Rule",
|
||||||
"button_createrule": "Create Rule",
|
"button_createrule": "Create Rule",
|
||||||
"button_returntorules": "Return to rules",
|
"button_returntorules": "Return to rules",
|
||||||
"button_cancelchanges": "Cancel",
|
"button_cancelchanges": "Cancel",
|
||||||
"button_discard": "Discard",
|
"button_discard": "Discard",
|
||||||
"text_condition1": "Send a notification when the metric is",
|
"text_condition1": "Send a notification when",
|
||||||
"text_condition2": "the threshold",
|
"text_condition2": "the threshold",
|
||||||
"text_condition3": "during the last",
|
"text_condition3": "during the last",
|
||||||
"option_5min": "5 mins",
|
"option_5min": "5 mins",
|
||||||
"option_10min": "10 mins",
|
"option_10min": "10 mins",
|
||||||
"option_15min": "15 mins",
|
"option_15min": "15 mins",
|
||||||
"option_60min": "60 mins",
|
"option_60min": "60 mins",
|
||||||
"option_4hours": "4 hours",
|
"option_4hours": "4 hours",
|
||||||
"option_24hours": "24 hours",
|
"option_24hours": "24 hours",
|
||||||
"field_threshold": "Alert Threshold",
|
"field_threshold": "Alert Threshold",
|
||||||
"option_allthetimes": "all the times",
|
"option_allthetimes": "all the times",
|
||||||
"option_atleastonce": "at least once",
|
"option_atleastonce": "at least once",
|
||||||
"option_onaverage": "on average",
|
"option_onaverage": "on average",
|
||||||
"option_intotal": "in total",
|
"option_intotal": "in total",
|
||||||
"option_above": "above",
|
"option_above": "above",
|
||||||
"option_below": "below",
|
"option_below": "below",
|
||||||
"option_equal": "is equal to",
|
"option_equal": "is equal to",
|
||||||
"option_notequal": "not equal to",
|
"option_notequal": "not equal to",
|
||||||
"button_query": "Query",
|
"button_query": "Query",
|
||||||
"button_formula": "Formula",
|
"button_formula": "Formula",
|
||||||
"tab_qb": "Query Builder",
|
"tab_qb": "Query Builder",
|
||||||
"tab_promql": "PromQL",
|
"tab_promql": "PromQL",
|
||||||
"tab_chquery": "ClickHouse Query",
|
"tab_chquery": "ClickHouse Query",
|
||||||
"title_confirm": "Confirm",
|
"title_confirm": "Confirm",
|
||||||
"button_ok": "Yes",
|
"button_ok": "Yes",
|
||||||
"button_cancel": "No",
|
"button_cancel": "No",
|
||||||
"field_promql_expr": "PromQL Expression",
|
"field_promql_expr": "PromQL Expression",
|
||||||
"field_alert_name": "Alert Name",
|
"field_alert_name": "Alert Name",
|
||||||
"field_alert_desc": "Alert Description",
|
"field_alert_desc": "Alert Description",
|
||||||
"field_labels": "Labels",
|
"field_labels": "Labels",
|
||||||
"field_severity": "Severity",
|
"field_severity": "Severity",
|
||||||
"option_critical": "Critical",
|
"option_critical": "Critical",
|
||||||
"option_error": "Error",
|
"option_error": "Error",
|
||||||
"option_warning": "Warning",
|
"option_warning": "Warning",
|
||||||
"option_info": "Info",
|
"option_info": "Info",
|
||||||
"user_guide_headline": "Steps to create an Alert",
|
"user_guide_headline": "Steps to create an Alert",
|
||||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_guide_ch_step1": "Step 1 - Define the metric",
|
"user_guide_ch_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
||||||
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
||||||
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_ch_step2b": "Enter the Alert threshold",
|
"user_guide_ch_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_tooltip_more_help": "More details on how to create alerts",
|
"user_tooltip_more_help": "More details on how to create alerts",
|
||||||
"choose_alert_type": "Choose a type for the alert:",
|
"choose_alert_type": "Choose a type for the alert:",
|
||||||
"metric_based_alert": "Metric based Alert",
|
"metric_based_alert": "Metric based Alert",
|
||||||
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
||||||
"log_based_alert": "Log-based Alert",
|
"log_based_alert": "Log-based Alert",
|
||||||
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
||||||
"traces_based_alert": "Trace-based Alert",
|
"traces_based_alert": "Trace-based Alert",
|
||||||
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
||||||
"exceptions_based_alert": "Exceptions-based Alert",
|
"exceptions_based_alert": "Exceptions-based Alert",
|
||||||
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
|
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
|
||||||
}
|
"field_unit": "Threshold unit",
|
||||||
|
"selected_query_placeholder": "Select query"
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,5 +13,17 @@
|
|||||||
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
|
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
|
||||||
"error_loading_json": "Error loading JSON file",
|
"error_loading_json": "Error loading JSON file",
|
||||||
"empty_json_not_allowed": "Empty JSON is not allowed",
|
"empty_json_not_allowed": "Empty JSON is not allowed",
|
||||||
"new_dashboard_title": "Sample Title"
|
"new_dashboard_title": "Sample Title",
|
||||||
|
"layout_saved_successfully": "Layout saved successfully",
|
||||||
|
"add_panel": "Add Panel",
|
||||||
|
"save_layout": "Save Layout",
|
||||||
|
"variable_updated_successfully": "Variable updated successfully",
|
||||||
|
"error_while_updating_variable": "Error while updating variable",
|
||||||
|
"dashboard_has_been_updated": "Dashboard has been updated",
|
||||||
|
"do_you_want_to_refresh_the_dashboard": "Do you want to refresh the dashboard?",
|
||||||
|
"delete_dashboard_success": "{{name}} dashboard deleted successfully",
|
||||||
|
"dashboard_unsave_changes": "There are unsaved changes in the Query builder, please stage and run the query or the changes will be lost. Press OK to discard.",
|
||||||
|
"dashboard_save_changes": "Your graph built with {{queryTag}} query will be saved. Press OK to confirm.",
|
||||||
|
"your_graph_build_with": "Your graph built with",
|
||||||
|
"dashboar_ok_confirm": "query will be saved. Press OK to confirm."
|
||||||
}
|
}
|
||||||
|
|||||||
3
frontend/public/locales/en-GB/explorer.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"name_of_the_view": "Name of the view"
|
||||||
|
}
|
||||||
1
frontend/public/locales/en-GB/logs.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{ "fetching_log_lines": "Fetching log lines" }
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
{
|
{
|
||||||
"general": "General",
|
"general": "General",
|
||||||
"alert_channels": "Alert Channels",
|
"alert_channels": "Alert Channels",
|
||||||
"organization_settings": "Organization Settings",
|
"organization_settings": "Organization Settings",
|
||||||
"my_settings": "My Settings",
|
"ingestion_settings": "Ingestion Settings",
|
||||||
"overview_metrics": "Overview Metrics",
|
"my_settings": "My Settings",
|
||||||
"dbcall_metrics": "Database Calls",
|
"overview_metrics": "Overview Metrics",
|
||||||
"external_metrics": "External Calls"
|
"dbcall_metrics": "Database Calls",
|
||||||
}
|
"external_metrics": "External Calls",
|
||||||
|
"pipeline": "Pipeline",
|
||||||
|
"pipelines": "Pipelines",
|
||||||
|
"archives": "Archives",
|
||||||
|
"logs_to_metrics": "Logs To Metrics"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,85 +1,85 @@
|
|||||||
{
|
{
|
||||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||||
"preview_chart_threshold_label": "Threshold",
|
"preview_chart_threshold_label": "Threshold",
|
||||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||||
"button_yes": "Yes",
|
"button_yes": "Yes",
|
||||||
"button_no": "No",
|
"button_no": "No",
|
||||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||||
"remove_label_success": "Labels cleared",
|
"remove_label_success": "Labels cleared",
|
||||||
"alert_form_step1": "Step 1 - Define the metric",
|
"alert_form_step1": "Step 1 - Define the metric",
|
||||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||||
"confirm_save_title": "Save Changes",
|
"confirm_save_title": "Save Changes",
|
||||||
"confirm_save_content_part1": "Your alert built with",
|
"confirm_save_content_part1": "Your alert built with",
|
||||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||||
"rule_created": "Rule created successfully",
|
"rule_created": "Rule created successfully",
|
||||||
"rule_edited": "Rule edited successfully",
|
"rule_edited": "Rule edited successfully",
|
||||||
"expression_missing": "expression is missing in {{where}}",
|
"expression_missing": "expression is missing in {{where}}",
|
||||||
"metricname_missing": "metric name is missing in {{where}}",
|
"metricname_missing": "metric name is missing in {{where}}",
|
||||||
"condition_required": "at least one metric condition is required",
|
"condition_required": "at least one metric condition is required",
|
||||||
"alertname_required": "alert name is required",
|
"alertname_required": "alert name is required",
|
||||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||||
"button_savechanges": "Save Rule",
|
"button_savechanges": "Save Rule",
|
||||||
"button_createrule": "Create Rule",
|
"button_createrule": "Create Rule",
|
||||||
"button_returntorules": "Return to rules",
|
"button_returntorules": "Return to rules",
|
||||||
"button_cancelchanges": "Cancel",
|
"button_cancelchanges": "Cancel",
|
||||||
"button_discard": "Discard",
|
"button_discard": "Discard",
|
||||||
"text_condition1": "Send a notification when the metric is",
|
"text_condition1": "Send a notification when",
|
||||||
"text_condition2": "the threshold",
|
"text_condition2": "the threshold",
|
||||||
"text_condition3": "during the last",
|
"text_condition3": "during the last",
|
||||||
"option_5min": "5 mins",
|
"option_5min": "5 mins",
|
||||||
"option_10min": "10 mins",
|
"option_10min": "10 mins",
|
||||||
"option_15min": "15 mins",
|
"option_15min": "15 mins",
|
||||||
"option_60min": "60 mins",
|
"option_60min": "60 mins",
|
||||||
"option_4hours": "4 hours",
|
"option_4hours": "4 hours",
|
||||||
"option_24hours": "24 hours",
|
"option_24hours": "24 hours",
|
||||||
"field_threshold": "Alert Threshold",
|
"field_threshold": "Alert Threshold",
|
||||||
"option_allthetimes": "all the times",
|
"option_allthetimes": "all the times",
|
||||||
"option_atleastonce": "at least once",
|
"option_atleastonce": "at least once",
|
||||||
"option_onaverage": "on average",
|
"option_onaverage": "on average",
|
||||||
"option_intotal": "in total",
|
"option_intotal": "in total",
|
||||||
"option_above": "above",
|
"option_above": "above",
|
||||||
"option_below": "below",
|
"option_below": "below",
|
||||||
"option_equal": "is equal to",
|
"option_equal": "is equal to",
|
||||||
"option_notequal": "not equal to",
|
"option_notequal": "not equal to",
|
||||||
"button_query": "Query",
|
"button_query": "Query",
|
||||||
"button_formula": "Formula",
|
"button_formula": "Formula",
|
||||||
"tab_qb": "Query Builder",
|
"tab_qb": "Query Builder",
|
||||||
"tab_promql": "PromQL",
|
"tab_promql": "PromQL",
|
||||||
"title_confirm": "Confirm",
|
"title_confirm": "Confirm",
|
||||||
"button_ok": "Yes",
|
"button_ok": "Yes",
|
||||||
"button_cancel": "No",
|
"button_cancel": "No",
|
||||||
"field_promql_expr": "PromQL Expression",
|
"field_promql_expr": "PromQL Expression",
|
||||||
"field_alert_name": "Alert Name",
|
"field_alert_name": "Alert Name",
|
||||||
"field_alert_desc": "Alert Description",
|
"field_alert_desc": "Alert Description",
|
||||||
"field_labels": "Labels",
|
"field_labels": "Labels",
|
||||||
"field_severity": "Severity",
|
"field_severity": "Severity",
|
||||||
"option_critical": "Critical",
|
"option_critical": "Critical",
|
||||||
"option_error": "Error",
|
"option_error": "Error",
|
||||||
"option_warning": "Warning",
|
"option_warning": "Warning",
|
||||||
"option_info": "Info",
|
"option_info": "Info",
|
||||||
"user_guide_headline": "Steps to create an Alert",
|
"user_guide_headline": "Steps to create an Alert",
|
||||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_tooltip_more_help": "More details on how to create alerts"
|
"user_tooltip_more_help": "More details on how to create alerts"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
"label_orgname": "Organization Name",
|
"label_orgname": "Organization Name",
|
||||||
"placeholder_orgname": "Your Company",
|
"placeholder_orgname": "Your Company",
|
||||||
"prompt_keepme_posted": "Keep me updated on new SigNoz features",
|
"prompt_keepme_posted": "Keep me updated on new SigNoz features",
|
||||||
"prompt_anonymise": "Anonymise my usage date. We collect data to measure product usage",
|
"prompt_anonymise": "Anonymise my usage data. We collect data to measure product usage",
|
||||||
"failed_confirm_password": "Passwords don’t match. Please try again",
|
"failed_confirm_password": "Passwords don’t match. Please try again",
|
||||||
"unexpected_error": "Something went wrong",
|
"unexpected_error": "Something went wrong",
|
||||||
"failed_to_initiate_login": "Signup completed but failed to initiate login",
|
"failed_to_initiate_login": "Signup completed but failed to initiate login",
|
||||||
|
|||||||
41
frontend/public/locales/en-GB/titles.json
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{
|
||||||
|
"SIGN_UP": "SigNoz | Sign Up",
|
||||||
|
"LOGIN": "SigNoz | Login",
|
||||||
|
"GET_STARTED": "SigNoz | Get Started",
|
||||||
|
"SERVICE_METRICS": "SigNoz | Service Metrics",
|
||||||
|
"SERVICE_MAP": "SigNoz | Service Map",
|
||||||
|
"TRACE": "SigNoz | Trace",
|
||||||
|
"TRACE_DETAIL": "SigNoz | Trace Detail",
|
||||||
|
"TRACES_EXPLORER": "SigNoz | Traces Explorer",
|
||||||
|
"SETTINGS": "SigNoz | Settings",
|
||||||
|
"USAGE_EXPLORER": "SigNoz | Usage Explorer",
|
||||||
|
"APPLICATION": "SigNoz | Home",
|
||||||
|
"BILLING": "SigNoz | Billing",
|
||||||
|
"ALL_DASHBOARD": "SigNoz | All Dashboards",
|
||||||
|
"DASHBOARD": "SigNoz | Dashboard",
|
||||||
|
"DASHBOARD_WIDGET": "SigNoz | Dashboard Widget",
|
||||||
|
"EDIT_ALERTS": "SigNoz | Edit Alerts",
|
||||||
|
"LIST_ALL_ALERT": "SigNoz | All Alerts",
|
||||||
|
"ALERTS_NEW": "SigNoz | New Alert",
|
||||||
|
"ALL_CHANNELS": "SigNoz | All Channels",
|
||||||
|
"CHANNELS_NEW": "SigNoz | New Channel",
|
||||||
|
"CHANNELS_EDIT": "SigNoz | Edit Channel",
|
||||||
|
"ALL_ERROR": "SigNoz | All Errors",
|
||||||
|
"ERROR_DETAIL": "SigNoz | Error Detail",
|
||||||
|
"VERSION": "SigNoz | Version",
|
||||||
|
"MY_SETTINGS": "SigNoz | My Settings",
|
||||||
|
"ORG_SETTINGS": "SigNoz | Organization Settings",
|
||||||
|
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
|
||||||
|
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
|
||||||
|
"UN_AUTHORIZED": "SigNoz | Unauthorized",
|
||||||
|
"NOT_FOUND": "SigNoz | Page Not Found",
|
||||||
|
"LOGS": "SigNoz | Logs",
|
||||||
|
"LOGS_EXPLORER": "SigNoz | Logs Explorer",
|
||||||
|
"LIVE_LOGS": "SigNoz | Live Logs",
|
||||||
|
"HOME_PAGE": "Open source Observability Platform | SigNoz",
|
||||||
|
"PASSWORD_RESET": "SigNoz | Password Reset",
|
||||||
|
"LIST_LICENSES": "SigNoz | List of Licenses",
|
||||||
|
"WORKSPACE_LOCKED": "SigNoz | Workspace Locked",
|
||||||
|
"SUPPORT": "SigNoz | Support",
|
||||||
|
"DEFAULT": "Open source Observability Platform | SigNoz"
|
||||||
|
}
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
"options_menu": {
|
"options_menu": {
|
||||||
"options": "Options",
|
"options": "Options",
|
||||||
"format": "Format",
|
"format": "Format",
|
||||||
"row": "Row",
|
"raw": "Raw",
|
||||||
"default": "Default",
|
"default": "Default",
|
||||||
"column": "Column",
|
"column": "Column",
|
||||||
"maxLines": "Max lines per Row",
|
"maxLines": "Max lines per Row",
|
||||||
|
|||||||
@@ -12,6 +12,8 @@
|
|||||||
"routes": {
|
"routes": {
|
||||||
"general": "General",
|
"general": "General",
|
||||||
"alert_channels": "Alert Channels",
|
"alert_channels": "Alert Channels",
|
||||||
"all_errors": "All Exceptions"
|
"all_errors": "All Exceptions",
|
||||||
|
"index_fields": "Index Fields",
|
||||||
|
"pipelines": "Pipelines"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,112 +1,114 @@
|
|||||||
{
|
{
|
||||||
"target_missing": "Please enter a threshold to proceed",
|
"target_missing": "Please enter a threshold to proceed",
|
||||||
"rule_test_fired": "Test notification sent successfully",
|
"rule_test_fired": "Test notification sent successfully",
|
||||||
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
"no_alerts_found": "No alerts found during the evaluation. This happens when rule condition is unsatisfied. You may adjust the rule threshold and retry.",
|
||||||
"button_testrule": "Test Notification",
|
"button_testrule": "Test Notification",
|
||||||
"label_channel_select": "Notification Channels",
|
"label_channel_select": "Notification Channels",
|
||||||
"placeholder_channel_select": "select one or more channels",
|
"placeholder_channel_select": "select one or more channels",
|
||||||
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
"channel_select_tooltip": "Leave empty to send this alert on all the configured channels",
|
||||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||||
"preview_chart_threshold_label": "Threshold",
|
"preview_chart_threshold_label": "Threshold",
|
||||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||||
"button_yes": "Yes",
|
"button_yes": "Yes",
|
||||||
"button_no": "No",
|
"button_no": "No",
|
||||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||||
"remove_label_success": "Labels cleared",
|
"remove_label_success": "Labels cleared",
|
||||||
"alert_form_step1": "Step 1 - Define the metric",
|
"alert_form_step1": "Step 1 - Define the metric",
|
||||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||||
"confirm_save_title": "Save Changes",
|
"confirm_save_title": "Save Changes",
|
||||||
"confirm_save_content_part1": "Your alert built with",
|
"confirm_save_content_part1": "Your alert built with",
|
||||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||||
"rule_created": "Rule created successfully",
|
"rule_created": "Rule created successfully",
|
||||||
"rule_edited": "Rule edited successfully",
|
"rule_edited": "Rule edited successfully",
|
||||||
"expression_missing": "expression is missing in {{where}}",
|
"expression_missing": "expression is missing in {{where}}",
|
||||||
"metricname_missing": "metric name is missing in {{where}}",
|
"metricname_missing": "metric name is missing in {{where}}",
|
||||||
"condition_required": "at least one metric condition is required",
|
"condition_required": "at least one metric condition is required",
|
||||||
"alertname_required": "alert name is required",
|
"alertname_required": "alert name is required",
|
||||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||||
"chquery_required": "query is required when query format is set to ClickHouse",
|
"chquery_required": "query is required when query format is set to ClickHouse",
|
||||||
"button_savechanges": "Save Rule",
|
"button_savechanges": "Save Rule",
|
||||||
"button_createrule": "Create Rule",
|
"button_createrule": "Create Rule",
|
||||||
"button_returntorules": "Return to rules",
|
"button_returntorules": "Return to rules",
|
||||||
"button_cancelchanges": "Cancel",
|
"button_cancelchanges": "Cancel",
|
||||||
"button_discard": "Discard",
|
"button_discard": "Discard",
|
||||||
"text_condition1": "Send a notification when the metric is",
|
"text_condition1": "Send a notification when",
|
||||||
"text_condition2": "the threshold",
|
"text_condition2": "the threshold",
|
||||||
"text_condition3": "during the last",
|
"text_condition3": "during the last",
|
||||||
"option_5min": "5 mins",
|
"option_5min": "5 mins",
|
||||||
"option_10min": "10 mins",
|
"option_10min": "10 mins",
|
||||||
"option_15min": "15 mins",
|
"option_15min": "15 mins",
|
||||||
"option_60min": "60 mins",
|
"option_60min": "60 mins",
|
||||||
"option_4hours": "4 hours",
|
"option_4hours": "4 hours",
|
||||||
"option_24hours": "24 hours",
|
"option_24hours": "24 hours",
|
||||||
"field_threshold": "Alert Threshold",
|
"field_threshold": "Alert Threshold",
|
||||||
"option_allthetimes": "all the times",
|
"option_allthetimes": "all the times",
|
||||||
"option_atleastonce": "at least once",
|
"option_atleastonce": "at least once",
|
||||||
"option_onaverage": "on average",
|
"option_onaverage": "on average",
|
||||||
"option_intotal": "in total",
|
"option_intotal": "in total",
|
||||||
"option_above": "above",
|
"option_above": "above",
|
||||||
"option_below": "below",
|
"option_below": "below",
|
||||||
"option_equal": "is equal to",
|
"option_equal": "is equal to",
|
||||||
"option_notequal": "not equal to",
|
"option_notequal": "not equal to",
|
||||||
"button_query": "Query",
|
"button_query": "Query",
|
||||||
"button_formula": "Formula",
|
"button_formula": "Formula",
|
||||||
"tab_qb": "Query Builder",
|
"tab_qb": "Query Builder",
|
||||||
"tab_promql": "PromQL",
|
"tab_promql": "PromQL",
|
||||||
"tab_chquery": "ClickHouse Query",
|
"tab_chquery": "ClickHouse Query",
|
||||||
"title_confirm": "Confirm",
|
"title_confirm": "Confirm",
|
||||||
"button_ok": "Yes",
|
"button_ok": "Yes",
|
||||||
"button_cancel": "No",
|
"button_cancel": "No",
|
||||||
"field_promql_expr": "PromQL Expression",
|
"field_promql_expr": "PromQL Expression",
|
||||||
"field_alert_name": "Alert Name",
|
"field_alert_name": "Alert Name",
|
||||||
"field_alert_desc": "Alert Description",
|
"field_alert_desc": "Alert Description",
|
||||||
"field_labels": "Labels",
|
"field_labels": "Labels",
|
||||||
"field_severity": "Severity",
|
"field_severity": "Severity",
|
||||||
"option_critical": "Critical",
|
"option_critical": "Critical",
|
||||||
"option_error": "Error",
|
"option_error": "Error",
|
||||||
"option_warning": "Warning",
|
"option_warning": "Warning",
|
||||||
"option_info": "Info",
|
"option_info": "Info",
|
||||||
"user_guide_headline": "Steps to create an Alert",
|
"user_guide_headline": "Steps to create an Alert",
|
||||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_guide_ch_step1": "Step 1 - Define the metric",
|
"user_guide_ch_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
|
||||||
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
|
||||||
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_ch_step2b": "Enter the Alert threshold",
|
"user_guide_ch_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
"user_guide_ch_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_tooltip_more_help": "More details on how to create alerts",
|
"user_tooltip_more_help": "More details on how to create alerts",
|
||||||
"choose_alert_type": "Choose a type for the alert:",
|
"choose_alert_type": "Choose a type for the alert:",
|
||||||
"metric_based_alert": "Metric based Alert",
|
"metric_based_alert": "Metric based Alert",
|
||||||
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
|
||||||
"log_based_alert": "Log-based Alert",
|
"log_based_alert": "Log-based Alert",
|
||||||
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
|
||||||
"traces_based_alert": "Trace-based Alert",
|
"traces_based_alert": "Trace-based Alert",
|
||||||
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
|
||||||
"exceptions_based_alert": "Exceptions-based Alert",
|
"exceptions_based_alert": "Exceptions-based Alert",
|
||||||
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
|
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.",
|
||||||
}
|
"field_unit": "Threshold unit",
|
||||||
|
"selected_query_placeholder": "Select query"
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,6 +20,9 @@
|
|||||||
"field_slack_recipient": "Recipient",
|
"field_slack_recipient": "Recipient",
|
||||||
"field_slack_title": "Title",
|
"field_slack_title": "Title",
|
||||||
"field_slack_description": "Description",
|
"field_slack_description": "Description",
|
||||||
|
"field_opsgenie_api_key": "API Key",
|
||||||
|
"field_opsgenie_description": "Description",
|
||||||
|
"placeholder_opsgenie_description": "Description",
|
||||||
"field_webhook_username": "User Name (optional)",
|
"field_webhook_username": "User Name (optional)",
|
||||||
"field_webhook_password": "Password (optional)",
|
"field_webhook_password": "Password (optional)",
|
||||||
"field_pager_routing_key": "Routing Key",
|
"field_pager_routing_key": "Routing Key",
|
||||||
@@ -31,8 +34,12 @@
|
|||||||
"field_pager_class": "Class",
|
"field_pager_class": "Class",
|
||||||
"field_pager_client": "Client",
|
"field_pager_client": "Client",
|
||||||
"field_pager_client_url": "Client URL",
|
"field_pager_client_url": "Client URL",
|
||||||
|
"field_opsgenie_message": "Message",
|
||||||
|
"field_opsgenie_priority": "Priority",
|
||||||
"placeholder_slack_description": "Description",
|
"placeholder_slack_description": "Description",
|
||||||
"placeholder_pager_description": "Description",
|
"placeholder_pager_description": "Description",
|
||||||
|
"placeholder_opsgenie_message": "Message",
|
||||||
|
"placeholder_opsgenie_priority": "Priority",
|
||||||
"help_pager_client": "Shows up as event source in Pagerduty",
|
"help_pager_client": "Shows up as event source in Pagerduty",
|
||||||
"help_pager_client_url": "Shows up as event source link in Pagerduty",
|
"help_pager_client_url": "Shows up as event source link in Pagerduty",
|
||||||
"help_pager_class": "The class/type of the event",
|
"help_pager_class": "The class/type of the event",
|
||||||
@@ -43,6 +50,9 @@
|
|||||||
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
|
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
|
||||||
"help_webhook_password": "Specify a password or bearer token",
|
"help_webhook_password": "Specify a password or bearer token",
|
||||||
"help_pager_description": "Shows up as description in pagerduty",
|
"help_pager_description": "Shows up as description in pagerduty",
|
||||||
|
"help_opsgenie_message": "Shows up as message in opsgenie",
|
||||||
|
"help_opsgenie_priority": "Priority of the incident",
|
||||||
|
"help_opsgenie_description": "Shows up as description in opsgenie",
|
||||||
"channel_creation_done": "Successfully created the channel",
|
"channel_creation_done": "Successfully created the channel",
|
||||||
"channel_creation_failed": "An unexpected error occurred while creating this channel",
|
"channel_creation_failed": "An unexpected error occurred while creating this channel",
|
||||||
"channel_edit_done": "Channels Edited Successfully",
|
"channel_edit_done": "Channels Edited Successfully",
|
||||||
|
|||||||
@@ -13,5 +13,20 @@
|
|||||||
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
|
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
|
||||||
"error_loading_json": "Error loading JSON file",
|
"error_loading_json": "Error loading JSON file",
|
||||||
"empty_json_not_allowed": "Empty JSON is not allowed",
|
"empty_json_not_allowed": "Empty JSON is not allowed",
|
||||||
"new_dashboard_title": "Sample Title"
|
"new_dashboard_title": "Sample Title",
|
||||||
|
"layout_saved_successfully": "Layout saved successfully",
|
||||||
|
"add_panel": "Add Panel",
|
||||||
|
"save_layout": "Save Layout",
|
||||||
|
"full_view": "Full Screen View",
|
||||||
|
"variable_updated_successfully": "Variable updated successfully",
|
||||||
|
"error_while_updating_variable": "Error while updating variable",
|
||||||
|
"dashboard_has_been_updated": "Dashboard has been updated",
|
||||||
|
"do_you_want_to_refresh_the_dashboard": "Do you want to refresh the dashboard?",
|
||||||
|
"locked_dashboard_delete_tooltip_admin_author": "Dashboard is locked. Please unlock the dashboard to enable delete.",
|
||||||
|
"locked_dashboard_delete_tooltip_editor": "Dashboard is locked. Please contact admin to delete the dashboard.",
|
||||||
|
"delete_dashboard_success": "{{name}} dashboard deleted successfully",
|
||||||
|
"dashboard_unsave_changes": "There are unsaved changes in the Query builder, please stage and run the query or the changes will be lost. Press OK to discard.",
|
||||||
|
"dashboard_save_changes": "Your graph built with {{queryTag}} query will be saved. Press OK to confirm.",
|
||||||
|
"your_graph_build_with": "Your graph built with",
|
||||||
|
"dashboar_ok_confirm": "query will be saved. Press OK to confirm."
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,5 +3,7 @@
|
|||||||
"see_error_in_trace_graph": "See the error in trace graph",
|
"see_error_in_trace_graph": "See the error in trace graph",
|
||||||
"stack_trace": "Stacktrace",
|
"stack_trace": "Stacktrace",
|
||||||
"older": "Older",
|
"older": "Older",
|
||||||
"newer": "Newer"
|
"newer": "Newer",
|
||||||
|
"something_went_wrong": "Oops !!! Something went wrong",
|
||||||
|
"contact_if_issue_exists": "Don't worry, our team is here to help. Please contact support if the issue persists."
|
||||||
}
|
}
|
||||||
|
|||||||
3
frontend/public/locales/en/explorer.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"name_of_the_view": "Name of the view"
|
||||||
|
}
|
||||||
1
frontend/public/locales/en/logs.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{ "fetching_log_lines": "Fetching log lines" }
|
||||||
46
frontend/public/locales/en/pipeline.json
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"delete": "Delete",
|
||||||
|
"filter": "Filter",
|
||||||
|
"update": "Update",
|
||||||
|
"create": "Create",
|
||||||
|
"reorder": "Reorder",
|
||||||
|
"cancel": "Cancel",
|
||||||
|
"learn_more": "Learn more about pipelines",
|
||||||
|
"reorder_pipeline": "Do you want to reorder pipeline?",
|
||||||
|
"reorder_pipeline_description": "Logs are processed sequentially in processors and pipelines. Reordering it may change how data is processed by them.",
|
||||||
|
"delete_pipeline": "Do you want to delete pipeline",
|
||||||
|
"delete_pipeline_description": "Logs are processed sequentially in processors and pipelines. Deleting a pipeline may change content of data processed by other pipelines & processors",
|
||||||
|
"add_new_pipeline": "Add a New Pipeline",
|
||||||
|
"new_pipeline": "New Pipeline",
|
||||||
|
"enter_edit_mode": "Enter Edit Mode",
|
||||||
|
"save_configuration": "Save Configuration",
|
||||||
|
"edit_pipeline": "Edit Pipeline",
|
||||||
|
"create_pipeline": "Create New Pipeline",
|
||||||
|
"add_new_processor": "Add Processor",
|
||||||
|
"edit_processor": "Edit Processor",
|
||||||
|
"create_processor": "Create New Processor",
|
||||||
|
"processor_type": "Select Processor Type",
|
||||||
|
"reorder_processor": "Do you want to reorder processor?",
|
||||||
|
"reorder_processor_description": "Logs are processed sequentially in processors. Reordering it may change how data is processed by them.",
|
||||||
|
"delete_processor": "Do you want to delete processor",
|
||||||
|
"delete_processor_description": "Logs are processed sequentially in processors. Deleting a processor may change content of data processed by other processors",
|
||||||
|
"search_pipeline_placeholder": "Filter Pipelines",
|
||||||
|
"pipeline_name_placeholder": "Name",
|
||||||
|
"pipeline_filter_placeholder": "Filter for selecting logs to be processed by this pipeline. Example: service_name = billing",
|
||||||
|
"pipeline_tags_placeholder": "Tags",
|
||||||
|
"pipeline_description_placeholder": "Enter description for your pipeline",
|
||||||
|
"processor_name_placeholder": "Name",
|
||||||
|
"processor_regex_placeholder": "Regex",
|
||||||
|
"processor_parsefrom_placeholder": "Parse From",
|
||||||
|
"processor_parseto_placeholder": "Parse To",
|
||||||
|
"processor_onerror_placeholder": "on Error",
|
||||||
|
"processor_pattern_placeholder": "Pattern",
|
||||||
|
"processor_field_placeholder": "Field",
|
||||||
|
"processor_value_placeholder": "Value",
|
||||||
|
"processor_description_placeholder": "example rule: %{word:first}",
|
||||||
|
"processor_trace_id_placeholder": "Parse Trace ID from",
|
||||||
|
"processor_span_id_placeholder": "Parse Span ID from",
|
||||||
|
"processor_trace_flags_placeholder": "Parse Trace flags from",
|
||||||
|
"processor_from_placeholder": "From",
|
||||||
|
"processor_to_placeholder": "To"
|
||||||
|
}
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
{
|
{
|
||||||
"general": "General",
|
"general": "General",
|
||||||
"alert_channels": "Alert Channels",
|
"alert_channels": "Alert Channels",
|
||||||
"organization_settings": "Organization Settings",
|
"organization_settings": "Organization Settings",
|
||||||
"my_settings": "My Settings",
|
"ingestion_settings": "Ingestion Settings",
|
||||||
"overview_metrics": "Overview Metrics",
|
"my_settings": "My Settings",
|
||||||
"dbcall_metrics": "Database Calls",
|
"overview_metrics": "Overview Metrics",
|
||||||
"external_metrics": "External Calls"
|
"dbcall_metrics": "Database Calls",
|
||||||
}
|
"external_metrics": "External Calls",
|
||||||
|
"pipeline": "Pipeline",
|
||||||
|
"pipelines": "Pipelines",
|
||||||
|
"archives": "Archives",
|
||||||
|
"logs_to_metrics": "Logs To Metrics"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,85 +1,85 @@
|
|||||||
{
|
{
|
||||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||||
"preview_chart_threshold_label": "Threshold",
|
"preview_chart_threshold_label": "Threshold",
|
||||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||||
"button_yes": "Yes",
|
"button_yes": "Yes",
|
||||||
"button_no": "No",
|
"button_no": "No",
|
||||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||||
"remove_label_success": "Labels cleared",
|
"remove_label_success": "Labels cleared",
|
||||||
"alert_form_step1": "Step 1 - Define the metric",
|
"alert_form_step1": "Step 1 - Define the metric",
|
||||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||||
"confirm_save_title": "Save Changes",
|
"confirm_save_title": "Save Changes",
|
||||||
"confirm_save_content_part1": "Your alert built with",
|
"confirm_save_content_part1": "Your alert built with",
|
||||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||||
"rule_created": "Rule created successfully",
|
"rule_created": "Rule created successfully",
|
||||||
"rule_edited": "Rule edited successfully",
|
"rule_edited": "Rule edited successfully",
|
||||||
"expression_missing": "expression is missing in {{where}}",
|
"expression_missing": "expression is missing in {{where}}",
|
||||||
"metricname_missing": "metric name is missing in {{where}}",
|
"metricname_missing": "metric name is missing in {{where}}",
|
||||||
"condition_required": "at least one metric condition is required",
|
"condition_required": "at least one metric condition is required",
|
||||||
"alertname_required": "alert name is required",
|
"alertname_required": "alert name is required",
|
||||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||||
"button_savechanges": "Save Rule",
|
"button_savechanges": "Save Rule",
|
||||||
"button_createrule": "Create Rule",
|
"button_createrule": "Create Rule",
|
||||||
"button_returntorules": "Return to rules",
|
"button_returntorules": "Return to rules",
|
||||||
"button_cancelchanges": "Cancel",
|
"button_cancelchanges": "Cancel",
|
||||||
"button_discard": "Discard",
|
"button_discard": "Discard",
|
||||||
"text_condition1": "Send a notification when the metric is",
|
"text_condition1": "Send a notification when",
|
||||||
"text_condition2": "the threshold",
|
"text_condition2": "the threshold",
|
||||||
"text_condition3": "during the last",
|
"text_condition3": "during the last",
|
||||||
"option_5min": "5 mins",
|
"option_5min": "5 mins",
|
||||||
"option_10min": "10 mins",
|
"option_10min": "10 mins",
|
||||||
"option_15min": "15 mins",
|
"option_15min": "15 mins",
|
||||||
"option_60min": "60 mins",
|
"option_60min": "60 mins",
|
||||||
"option_4hours": "4 hours",
|
"option_4hours": "4 hours",
|
||||||
"option_24hours": "24 hours",
|
"option_24hours": "24 hours",
|
||||||
"field_threshold": "Alert Threshold",
|
"field_threshold": "Alert Threshold",
|
||||||
"option_allthetimes": "all the times",
|
"option_allthetimes": "all the times",
|
||||||
"option_atleastonce": "at least once",
|
"option_atleastonce": "at least once",
|
||||||
"option_onaverage": "on average",
|
"option_onaverage": "on average",
|
||||||
"option_intotal": "in total",
|
"option_intotal": "in total",
|
||||||
"option_above": "above",
|
"option_above": "above",
|
||||||
"option_below": "below",
|
"option_below": "below",
|
||||||
"option_equal": "is equal to",
|
"option_equal": "is equal to",
|
||||||
"option_notequal": "not equal to",
|
"option_notequal": "not equal to",
|
||||||
"button_query": "Query",
|
"button_query": "Query",
|
||||||
"button_formula": "Formula",
|
"button_formula": "Formula",
|
||||||
"tab_qb": "Query Builder",
|
"tab_qb": "Query Builder",
|
||||||
"tab_promql": "PromQL",
|
"tab_promql": "PromQL",
|
||||||
"title_confirm": "Confirm",
|
"title_confirm": "Confirm",
|
||||||
"button_ok": "Yes",
|
"button_ok": "Yes",
|
||||||
"button_cancel": "No",
|
"button_cancel": "No",
|
||||||
"field_promql_expr": "PromQL Expression",
|
"field_promql_expr": "PromQL Expression",
|
||||||
"field_alert_name": "Alert Name",
|
"field_alert_name": "Alert Name",
|
||||||
"field_alert_desc": "Alert Description",
|
"field_alert_desc": "Alert Description",
|
||||||
"field_labels": "Labels",
|
"field_labels": "Labels",
|
||||||
"field_severity": "Severity",
|
"field_severity": "Severity",
|
||||||
"option_critical": "Critical",
|
"option_critical": "Critical",
|
||||||
"option_error": "Error",
|
"option_error": "Error",
|
||||||
"option_warning": "Warning",
|
"option_warning": "Warning",
|
||||||
"option_info": "Info",
|
"option_info": "Info",
|
||||||
"user_guide_headline": "Steps to create an Alert",
|
"user_guide_headline": "Steps to create an Alert",
|
||||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||||
"user_tooltip_more_help": "More details on how to create alerts"
|
"user_tooltip_more_help": "More details on how to create alerts"
|
||||||
}
|
}
|
||||||
|
|||||||