Compare commits
621 Commits
v0.11.4-rc
...
v0.24.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
765153caa8 | ||
|
|
6d7081a4bd | ||
|
|
f1818235dc | ||
|
|
c321a1741f | ||
|
|
a235beae36 | ||
|
|
98a2ef4080 | ||
|
|
5a2a987a9b | ||
|
|
7822b4efee | ||
|
|
8f1451e154 | ||
|
|
07833b9859 | ||
|
|
0de40a889d | ||
|
|
b3a6deb71b | ||
|
|
b5af238374 | ||
|
|
49afc2549f | ||
|
|
0ed6594e48 | ||
|
|
50142321f7 | ||
|
|
29df1188c7 | ||
|
|
f3817d7335 | ||
|
|
fea8a71f51 | ||
|
|
5e89211f53 | ||
|
|
0beffb50ca | ||
|
|
6efa1011aa | ||
|
|
c68b611ad9 | ||
|
|
69828548b1 | ||
|
|
22d0aa951c | ||
|
|
7f9ba6c43a | ||
|
|
7a177e18e4 | ||
|
|
433f930956 | ||
|
|
206e8b8dc3 | ||
|
|
225b2248c8 | ||
|
|
783a54a8ee | ||
|
|
216499051d | ||
|
|
6b77165d09 | ||
|
|
d26022efb1 | ||
|
|
60c0836d3e | ||
|
|
7f162e5381 | ||
|
|
98745fc307 | ||
|
|
08d496e314 | ||
|
|
538261aa99 | ||
|
|
10ffbf7d81 | ||
|
|
8e20ca8405 | ||
|
|
7818f918a8 | ||
|
|
5bfcc1db70 | ||
|
|
39c6410bbe | ||
|
|
149fdebfaa | ||
|
|
915738e1f7 | ||
|
|
340f14be3d | ||
|
|
a6e6c171c3 | ||
|
|
1d1ddbef40 | ||
|
|
d7f5e5d6ac | ||
|
|
c656803162 | ||
|
|
e746bae8db | ||
|
|
54899930b0 | ||
|
|
ac40e08474 | ||
|
|
c51a15f1e8 | ||
|
|
5346cdd283 | ||
|
|
cc62d2cf71 | ||
|
|
44416df5dc | ||
|
|
a1a5e8bf9b | ||
|
|
652bd52ea7 | ||
|
|
ed200e50c8 | ||
|
|
f0f93c64d2 | ||
|
|
c0d10f0d88 | ||
|
|
e3e0787459 | ||
|
|
c72729f8bc | ||
|
|
2b3934b845 | ||
|
|
2e85bd0264 | ||
|
|
e3d26d3f10 | ||
|
|
5042c56b4c | ||
|
|
84c81b054c | ||
|
|
532ebdc856 | ||
|
|
f18b073810 | ||
|
|
857e505323 | ||
|
|
1295e179b2 | ||
|
|
193dca3a2b | ||
|
|
360029f350 | ||
|
|
eb2a955323 | ||
|
|
1d00ac9ded | ||
|
|
d4b95b4848 | ||
|
|
0750231b4b | ||
|
|
c1664dde6a | ||
|
|
197ccca30f | ||
|
|
76ba364317 | ||
|
|
e97609ce23 | ||
|
|
720edb162e | ||
|
|
dba4e00b02 | ||
|
|
8363dadd8d | ||
|
|
b1c1a95e29 | ||
|
|
5540692500 | ||
|
|
2722538e82 | ||
|
|
b8aba4f935 | ||
|
|
ea89433dc0 | ||
|
|
22bbfaf495 | ||
|
|
bc400c2bcf | ||
|
|
b7c50cc76d | ||
|
|
193b04ff0f | ||
|
|
10a3a6d3e5 | ||
|
|
78da014b52 | ||
|
|
20e71ec08a | ||
|
|
cda37b99b4 | ||
|
|
709bfda0cc | ||
|
|
20687d5184 | ||
|
|
0f998a4845 | ||
|
|
3464b2a59c | ||
|
|
64d4532a6b | ||
|
|
1eabacbaf4 | ||
|
|
9b8f7a091c | ||
|
|
56402b0d40 | ||
|
|
bd18eee662 | ||
|
|
522bdf04ef | ||
|
|
314edaf1df | ||
|
|
c05c939ee1 | ||
|
|
555bb79866 | ||
|
|
2ee7817685 | ||
|
|
803cfd1aa3 | ||
|
|
1c98d4f55c | ||
|
|
4460b46e47 | ||
|
|
a6237d8640 | ||
|
|
3f4cd130ed | ||
|
|
cf05345ccd | ||
|
|
9e305cb672 | ||
|
|
0d82a93f18 | ||
|
|
96b94a619e | ||
|
|
37fc00b55f | ||
|
|
b782bd8909 | ||
|
|
bcacb1d2b0 | ||
|
|
7f05ce3d05 | ||
|
|
5bdb0e84d1 | ||
|
|
82936f73a3 | ||
|
|
9aa8148269 | ||
|
|
86c6c43f95 | ||
|
|
3792f137fa | ||
|
|
389385324f | ||
|
|
9ad17c2d60 | ||
|
|
52a222e87a | ||
|
|
8433d81dc0 | ||
|
|
148889e198 | ||
|
|
a649ced337 | ||
|
|
3ba8ee1d26 | ||
|
|
316cbe484b | ||
|
|
ef74ef3526 | ||
|
|
bd6745dd66 | ||
|
|
754ba93df9 | ||
|
|
bb7ea8e8fb | ||
|
|
4a467435e9 | ||
|
|
014d4a2e7c | ||
|
|
84c4668b67 | ||
|
|
0cf56d8247 | ||
|
|
99f863f444 | ||
|
|
f3e077ce52 | ||
|
|
5af5cb0cf0 | ||
|
|
04a9de1e32 | ||
|
|
7415de4751 | ||
|
|
e5bb125a55 | ||
|
|
540568d29f | ||
|
|
4d59f4c7e5 | ||
|
|
59025238b3 | ||
|
|
a67d064418 | ||
|
|
1770e6a157 | ||
|
|
ef1bc0beec | ||
|
|
b62b3591af | ||
|
|
97207f8e6d | ||
|
|
745626f516 | ||
|
|
826cbe0803 | ||
|
|
342e94d093 | ||
|
|
6614cd31c1 | ||
|
|
7086f7eafa | ||
|
|
edf7e9821f | ||
|
|
a52c104562 | ||
|
|
a3c917cca0 | ||
|
|
e73432df83 | ||
|
|
d2cdf401b8 | ||
|
|
9091b9b82c | ||
|
|
c69e2e0d50 | ||
|
|
ad5a9dcd6a | ||
|
|
7fd27ec09d | ||
|
|
26a806a7fe | ||
|
|
bc5862646d | ||
|
|
eb1c5c4565 | ||
|
|
fdbb8b652e | ||
|
|
7f9c226175 | ||
|
|
b9c87c1395 | ||
|
|
708f6dd03f | ||
|
|
2f27908434 | ||
|
|
234a69de8c | ||
|
|
fda0441686 | ||
|
|
b034c60897 | ||
|
|
5a81f5f90b | ||
|
|
818a984af3 | ||
|
|
1ded475b37 | ||
|
|
6e8be3fcc3 | ||
|
|
182ba3596d | ||
|
|
ef6d847c15 | ||
|
|
6e2ceb9efb | ||
|
|
02035ebd82 | ||
|
|
cf95d9c76f | ||
|
|
9ff055015f | ||
|
|
e2ce1eb88b | ||
|
|
5a00fbd1d2 | ||
|
|
a047801014 | ||
|
|
72452dc946 | ||
|
|
604d98be05 | ||
|
|
e7f5adc8a9 | ||
|
|
fb10d7d81f | ||
|
|
d67f709b8a | ||
|
|
81291c996f | ||
|
|
18fc1a2761 | ||
|
|
679eb39256 | ||
|
|
357e422eca | ||
|
|
ca3ff04f7d | ||
|
|
a0c320e47e | ||
|
|
5637188e72 | ||
|
|
7cb2399c4c | ||
|
|
93c9138fe1 | ||
|
|
d1a256a6d5 | ||
|
|
b6a455d264 | ||
|
|
c32b8638a4 | ||
|
|
e21f23874d | ||
|
|
43c05c9605 | ||
|
|
ec8e505647 | ||
|
|
c7f09354f7 | ||
|
|
df0502726d | ||
|
|
e8f2176566 | ||
|
|
9da399023b | ||
|
|
76331001b7 | ||
|
|
10e47b5bff | ||
|
|
25398d9d35 | ||
|
|
38bfc41190 | ||
|
|
8679f2c37a | ||
|
|
f7cd0d4934 | ||
|
|
12349d79a9 | ||
|
|
c5991b50bc | ||
|
|
8dbd1c65e9 | ||
|
|
2089c51f63 | ||
|
|
a021386cb8 | ||
|
|
8c2f33c95a | ||
|
|
bbda684e65 | ||
|
|
37ff9480e1 | ||
|
|
fe314a8ddd | ||
|
|
8bddee75a3 | ||
|
|
a8eec1b7ab | ||
|
|
93220ba6c2 | ||
|
|
f45ac7855e | ||
|
|
33ac5b79be | ||
|
|
d5e112a9bc | ||
|
|
428b10f78c | ||
|
|
8824916880 | ||
|
|
b24fadaf86 | ||
|
|
c149181924 | ||
|
|
5ad367a0fc | ||
|
|
bd248c46b2 | ||
|
|
dcad77746a | ||
|
|
b27bdac1f6 | ||
|
|
17438ca823 | ||
|
|
684eeace93 | ||
|
|
bbffac1603 | ||
|
|
37493b49e5 | ||
|
|
3e97d2ffa3 | ||
|
|
e2df2e7c41 | ||
|
|
6322578842 | ||
|
|
21c6d3ba99 | ||
|
|
975d57cade | ||
|
|
efe34d2582 | ||
|
|
d63a35e937 | ||
|
|
c49bb0696b | ||
|
|
9a58cc652c | ||
|
|
5ee0bb57cc | ||
|
|
6949c659af | ||
|
|
6c11c6d4da | ||
|
|
9557cb2f70 | ||
|
|
2e4f0cfc33 | ||
|
|
ea6ee6a6ef | ||
|
|
dd25ad95c7 | ||
|
|
63570c847a | ||
|
|
60b78e94d8 | ||
|
|
51f1d0fd05 | ||
|
|
502b8b1ba8 | ||
|
|
041d347d50 | ||
|
|
9aff047da4 | ||
|
|
0bc44c6fd9 | ||
|
|
23081996c4 | ||
|
|
2c206e8bf4 | ||
|
|
1726469aaa | ||
|
|
fb1e823e6b | ||
|
|
813eeb6d5a | ||
|
|
a3bc2ff24e | ||
|
|
0c2574cef8 | ||
|
|
7c952fd9cd | ||
|
|
9ac2308b89 | ||
|
|
81beaffa3d | ||
|
|
4db109cbad | ||
|
|
5b72919a55 | ||
|
|
d09290528f | ||
|
|
7dd09129aa | ||
|
|
5f73a82d9f | ||
|
|
d4bfe3a096 | ||
|
|
c92493904b | ||
|
|
c74896b213 | ||
|
|
0b3e8d797b | ||
|
|
c8f3e9024c | ||
|
|
8f6178f0a9 | ||
|
|
5ff9172103 | ||
|
|
67ba46abde | ||
|
|
20b1f96c19 | ||
|
|
0e8f09632f | ||
|
|
61a1d04252 | ||
|
|
80171eddea | ||
|
|
28684423d1 | ||
|
|
a5e4336e18 | ||
|
|
037559537b | ||
|
|
c9db4c9051 | ||
|
|
31a89bfdb3 | ||
|
|
776aa3471f | ||
|
|
36610c809e | ||
|
|
1a0c76a43b | ||
|
|
c1d00c1155 | ||
|
|
3f96325ad8 | ||
|
|
99ed314fc9 | ||
|
|
12e56932ee | ||
|
|
c4944370ce | ||
|
|
192d3881a1 | ||
|
|
d6152510c7 | ||
|
|
3e37a8b364 | ||
|
|
b9b63a0ac4 | ||
|
|
9d20c2f787 | ||
|
|
8ea0f72178 | ||
|
|
167050b4b5 | ||
|
|
fe640aae39 | ||
|
|
6c2faa21f4 | ||
|
|
c617784d7c | ||
|
|
1e7280136a | ||
|
|
17a5bc8cc3 | ||
|
|
c3763032df | ||
|
|
da4cbf6c2f | ||
|
|
97bfee48e1 | ||
|
|
da23d9e087 | ||
|
|
27db1b9080 | ||
|
|
55d7285c9a | ||
|
|
27c48674d4 | ||
|
|
d29dfa0751 | ||
|
|
d951483597 | ||
|
|
481792d4ca | ||
|
|
0fa20445d8 | ||
|
|
eb4ac18162 | ||
|
|
1ddda19c8e | ||
|
|
91c3abae37 | ||
|
|
b5debe6ea2 | ||
|
|
7367f8dd4b | ||
|
|
bac717e9e6 | ||
|
|
e1219ea942 | ||
|
|
1c867d3b4c | ||
|
|
65c2a0bf6a | ||
|
|
755d64061e | ||
|
|
500ab02c47 | ||
|
|
dfef41913f | ||
|
|
210c5fd7f2 | ||
|
|
c4b052c51e | ||
|
|
da79f93495 | ||
|
|
83e3e3c3ed | ||
|
|
7508c9148f | ||
|
|
b15463fd38 | ||
|
|
66b2e17bba | ||
|
|
9af991e424 | ||
|
|
59497ed53c | ||
|
|
7f04a4407b | ||
|
|
2a03291171 | ||
|
|
53bfc33075 | ||
|
|
c821e8bb75 | ||
|
|
eff87f2666 | ||
|
|
3f5171dc69 | ||
|
|
c5d7d9d134 | ||
|
|
6defa0ac8b | ||
|
|
e3fee332c7 | ||
|
|
2c7cefcc74 | ||
|
|
080a53a9b4 | ||
|
|
2a5cb78964 | ||
|
|
b99d7009a1 | ||
|
|
e46b7e41e5 | ||
|
|
50270281e3 | ||
|
|
5e5e81d81d | ||
|
|
eb2fe20025 | ||
|
|
df7f276f03 | ||
|
|
b0f62daa24 | ||
|
|
797352583a | ||
|
|
96267e2e3a | ||
|
|
388ef9453c | ||
|
|
995e45713c | ||
|
|
b0d5b15330 | ||
|
|
80cd317b3b | ||
|
|
51721f97c7 | ||
|
|
e7e0f5b96a | ||
|
|
a26ebb742a | ||
|
|
9d1305f174 | ||
|
|
ab514cc0f2 | ||
|
|
1f44f089e0 | ||
|
|
174fc107c2 | ||
|
|
06a55ccdd6 | ||
|
|
a3731e4c4e | ||
|
|
e183cace75 | ||
|
|
23490ca7f8 | ||
|
|
9f71e732c7 | ||
|
|
23d6287594 | ||
|
|
2624ce4007 | ||
|
|
3d5134b43c | ||
|
|
c657f96032 | ||
|
|
c18fff6ae8 | ||
|
|
28142764af | ||
|
|
2fa265ff2e | ||
|
|
dca0b11acd | ||
|
|
bad80def90 | ||
|
|
8965b9b503 | ||
|
|
05076968c9 | ||
|
|
7c8afc2e1c | ||
|
|
c8a1a8600e | ||
|
|
45cb1eb38f | ||
|
|
8ebb76bd0c | ||
|
|
309ffa4989 | ||
|
|
d787298600 | ||
|
|
7998d474e2 | ||
|
|
7b8ff5a285 | ||
|
|
cb22aef36f | ||
|
|
cf93712286 | ||
|
|
a906f94b8a | ||
|
|
93b6749920 | ||
|
|
8ab527b174 | ||
|
|
ad163c2b61 | ||
|
|
21f909f4c0 | ||
|
|
b67206dd65 | ||
|
|
ce5afd31fd | ||
|
|
9a184f5740 | ||
|
|
be14f1c32c | ||
|
|
ae37a608f8 | ||
|
|
aaeb579d0d | ||
|
|
1151e8521e | ||
|
|
d779b83715 | ||
|
|
47a41473df | ||
|
|
de370d7f0c | ||
|
|
8a5b26cefe | ||
|
|
2a20b6fc86 | ||
|
|
02ef1744b4 | ||
|
|
2c973adf0b | ||
|
|
f7ff491d35 | ||
|
|
6cd341a887 | ||
|
|
0832bce955 | ||
|
|
62b2462e03 | ||
|
|
152846f554 | ||
|
|
846da08cbd | ||
|
|
17f32e9765 | ||
|
|
48659a2957 | ||
|
|
a2a8a32d1c | ||
|
|
28f2ee2627 | ||
|
|
3b01bb2614 | ||
|
|
622e1765cf | ||
|
|
faaf0a6e73 | ||
|
|
4542a51531 | ||
|
|
191a538430 | ||
|
|
e6ce80213b | ||
|
|
3115b32dcd | ||
|
|
af272a368b | ||
|
|
b336a6cb45 | ||
|
|
b72815ca2f | ||
|
|
ed4a01dea6 | ||
|
|
1914c3b4a0 | ||
|
|
3811e96e23 | ||
|
|
8d16493432 | ||
|
|
db2bfbb887 | ||
|
|
213838a021 | ||
|
|
fd6f9a90e1 | ||
|
|
13f9922c53 | ||
|
|
a654baaa5b | ||
|
|
f766435acc | ||
|
|
d7a65ba689 | ||
|
|
05ce03e67d | ||
|
|
ba6818f487 | ||
|
|
ca53136cbf | ||
|
|
c46bef321c | ||
|
|
ba8f804b26 | ||
|
|
6cc7025e37 | ||
|
|
e62e541fc4 | ||
|
|
2f1ca93eda | ||
|
|
f1c7d72fc5 | ||
|
|
a405307c96 | ||
|
|
c85d48d7fa | ||
|
|
75470f6bb9 | ||
|
|
f75e688b32 | ||
|
|
5f3ca045df | ||
|
|
186632af69 | ||
|
|
fa652be926 | ||
|
|
1e39131c38 | ||
|
|
153e859ac3 | ||
|
|
d1cc29e118 | ||
|
|
972bf94dd0 | ||
|
|
3632208d45 | ||
|
|
cd9768c738 | ||
|
|
f01b9605db | ||
|
|
eec236af50 | ||
|
|
bbff2b459e | ||
|
|
d9535e7a8d | ||
|
|
a82bbe1a72 | ||
|
|
6812f55152 | ||
|
|
83163c17cd | ||
|
|
5ed7c9a46e | ||
|
|
2f323056d0 | ||
|
|
51b583480b | ||
|
|
7b1e2c8b98 | ||
|
|
b87f3bdb50 | ||
|
|
2f5908a3dd | ||
|
|
ca77820e9d | ||
|
|
a4346a2d93 | ||
|
|
44360ecacf | ||
|
|
b675c3cfec | ||
|
|
b23d8da96c | ||
|
|
215ea8d819 | ||
|
|
0c27d5acbc | ||
|
|
435d74c37e | ||
|
|
b35bdf01cc | ||
|
|
9b654143bb | ||
|
|
4841f150f4 | ||
|
|
16a49a8b04 | ||
|
|
1fd819b806 | ||
|
|
cab9e04cdd | ||
|
|
e8f341b850 | ||
|
|
1f6fcb9b8c | ||
|
|
1c7202b5bf | ||
|
|
24ac062bf5 | ||
|
|
b776bf5b09 | ||
|
|
144076e029 | ||
|
|
835251b342 | ||
|
|
ebbad5812f | ||
|
|
7b86022280 | ||
|
|
da1fd4b0cd | ||
|
|
57d28be9f5 | ||
|
|
126c9238ba | ||
|
|
31a3bc09c8 | ||
|
|
6ba5c0ecad | ||
|
|
27cd514fa5 | ||
|
|
f0e13784e5 | ||
|
|
742ceac32c | ||
|
|
545d46c39c | ||
|
|
d134e4f4d9 | ||
|
|
e03b0aa45f | ||
|
|
46e131698e | ||
|
|
d1ee15c372 | ||
|
|
1e035be978 | ||
|
|
88a97fc4b8 | ||
|
|
2e58f6db7a | ||
|
|
1916fc87b0 | ||
|
|
d8882acdd7 | ||
|
|
7f42b39684 | ||
|
|
b11f79b4c7 | ||
|
|
c717e39a1a | ||
|
|
c3253687d0 | ||
|
|
895c721b37 | ||
|
|
35f5fb6957 | ||
|
|
40ec4517c2 | ||
|
|
48a6f536fa | ||
|
|
13a6d7f7c6 | ||
|
|
8b6ed0f951 | ||
|
|
eef48c54f8 | ||
|
|
aad962d07d | ||
|
|
18bbb3cf36 | ||
|
|
a3455fb553 | ||
|
|
ece2988d0d | ||
|
|
db704b212d | ||
|
|
4b13b0a8a4 | ||
|
|
6f6499c267 | ||
|
|
3dcb44a758 | ||
|
|
0595cdc7af | ||
|
|
092c02762f | ||
|
|
d1d2829d2b | ||
|
|
ac446294e7 | ||
|
|
1cceab4d5e | ||
|
|
02898d14f9 | ||
|
|
09af6c262c | ||
|
|
faeaeb61a0 | ||
|
|
9c80ba6b78 | ||
|
|
dbba8b5b55 | ||
|
|
58ce838023 | ||
|
|
5260b152f5 | ||
|
|
f2dd254d83 | ||
|
|
82d53fa45c | ||
|
|
c38d1c150d | ||
|
|
16170eacc0 | ||
|
|
66ddbfc085 | ||
|
|
2715ab61a4 | ||
|
|
4d291e92b9 | ||
|
|
1b73649f8e | ||
|
|
0abae1c09c | ||
|
|
4d02603aed | ||
|
|
c58e43a678 | ||
|
|
b77bbe1e4f | ||
|
|
d4eb241c04 | ||
|
|
98e1a77a43 | ||
|
|
498b04491b | ||
|
|
4e58414cc2 | ||
|
|
67943cfec0 | ||
|
|
f170eb1b23 | ||
|
|
6931b18382 | ||
|
|
8a9d6f664a | ||
|
|
8affe8df31 | ||
|
|
1c8626e933 | ||
|
|
87932de668 | ||
|
|
1b52edb056 | ||
|
|
5a81557df7 | ||
|
|
8bb3eefeb5 | ||
|
|
a46f074e22 | ||
|
|
88fa3b7699 | ||
|
|
7f77bcca2b | ||
|
|
ab5311caac | ||
|
|
8aae9f53a9 | ||
|
|
18d80d47e5 | ||
|
|
8e5522820c | ||
|
|
5ae9557293 | ||
|
|
7e590f4bfb | ||
|
|
ce072bdc3f | ||
|
|
67c0c9032f | ||
|
|
6c9036fbf4 | ||
|
|
d06d41af87 | ||
|
|
2771d2e774 | ||
|
|
0cbba071ea | ||
|
|
7cec2db503 |
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@@ -2,6 +2,12 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
/frontend/ @palashgdev @pranshuchittora
|
||||
|
||||
/frontend/ @palashgdev
|
||||
/deploy/ @prashant-shahi
|
||||
/pkg/query-service/ @srikanthccv
|
||||
/sample-apps/ @prashant-shahi
|
||||
**/query-service/ @srikanthccv
|
||||
Makefile @srikanthccv
|
||||
go.* @srikanthccv
|
||||
.git* @srikanthccv
|
||||
.github @prashant-shahi
|
||||
|
||||
2
.github/config.yml
vendored
2
.github/config.yml
vendored
@@ -17,7 +17,7 @@ newPRWelcomeComment: >
|
||||
# Comment to be posted to on pull requests merged by a first time user
|
||||
firstPRMergeComment: >
|
||||
Congrats on merging your first pull request!
|
||||
|
||||
|
||||

|
||||
|
||||
We here at SigNoz are proud of you! 🥳
|
||||
|
||||
10
.github/workflows/build.yaml
vendored
10
.github/workflows/build.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
@@ -31,7 +31,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
- name: Build query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -41,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Build EE query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
8
.github/workflows/codeql.yaml
vendored
8
.github/workflows/codeql.yaml
vendored
@@ -39,11 +39,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -68,4 +68,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
9
.github/workflows/commitlint.yml
vendored
9
.github/workflows/commitlint.yml
vendored
@@ -7,12 +7,7 @@ jobs:
|
||||
lint-commits:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.1
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# we actually need "github.event.pull_request.commits + 1" commit
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v2.1.0
|
||||
# or just "yarn" if you depend on "@commitlint/cli" already
|
||||
- run: yarn add @commitlint/cli
|
||||
- run: yarn add @commitlint/config-conventional
|
||||
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD
|
||||
- uses: wagoid/commitlint-github-action@v5
|
||||
|
||||
@@ -12,11 +12,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v2
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
|
||||
12
.github/workflows/e2e-k3s.yaml
vendored
12
.github/workflows/e2e-k3s.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
DOCKER_TAG: pull-${{ github.event.number }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build query-service image
|
||||
env:
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
--set frontend.service.type=LoadBalancer \
|
||||
--set queryService.image.tag=$DOCKER_TAG \
|
||||
--set frontend.image.tag=$DOCKER_TAG
|
||||
|
||||
|
||||
# get pods, services and the container images
|
||||
kubectl get pods -n platform
|
||||
kubectl get svc -n platform
|
||||
@@ -69,12 +69,14 @@ jobs:
|
||||
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
|
||||
- name: Get short commit SHA and display tunnel URL
|
||||
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
|
||||
id: get-subdomain
|
||||
run: |
|
||||
subdomain="pr-$(git rev-parse --short HEAD)"
|
||||
echo "URL for tunnelling: https://$subdomain.loca.lt"
|
||||
echo "::set-output name=subdomain::$subdomain"
|
||||
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT
|
||||
worker_ip="$(curl -4 -s ipconfig.io/ip)"
|
||||
echo "Worker node IP address: $worker_ip"
|
||||
|
||||
- name: Start tunnel
|
||||
env:
|
||||
|
||||
4
.github/workflows/playwright.yaml
vendored
4
.github/workflows/playwright.yaml
vendored
@@ -9,8 +9,8 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "16.x"
|
||||
- name: Install dependencies
|
||||
|
||||
5
.github/workflows/pr_verify_linked_issue.yml
vendored
5
.github/workflows/pr_verify_linked_issue.yml
vendored
@@ -5,7 +5,7 @@ name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, synchronize, opened, reopened]
|
||||
types: [edited, opened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
@@ -14,7 +14,6 @@ jobs:
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||
uses: srikanthccv/verify-linked-issue-action@v0.71
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
28
.github/workflows/push.yaml
vendored
28
.github/workflows/push.yaml
vendored
@@ -14,19 +14,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
@@ -49,19 +49,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
@@ -84,7 +84,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
@@ -97,15 +97,15 @@ jobs:
|
||||
run: npm run lint
|
||||
continue-on-error: true
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
|
||||
6
.github/workflows/release-drafter.yml
vendored
6
.github/workflows/release-drafter.yml
vendored
@@ -12,6 +12,12 @@ on:
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
permissions:
|
||||
# write permission is required to create a github release
|
||||
contents: write
|
||||
# write permission is required for autolabeler
|
||||
# otherwise, read permission is required at least
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# (Optional) GitHub Enterprise requires GHE_HOST variable set
|
||||
|
||||
10
.github/workflows/remove-label.yaml
vendored
10
.github/workflows/remove-label.yaml
vendored
@@ -8,9 +8,15 @@ jobs:
|
||||
remove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove label
|
||||
uses: buildsville/add-remove-label@v1
|
||||
- name: Remove label ok-to-test from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
with:
|
||||
label: ok-to-test
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Remove label testing-deploy from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
with:
|
||||
label: testing-deploy
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
25
.github/workflows/repo-stats.yml
vendored
25
.github/workflows/repo-stats.yml
vendored
@@ -1,25 +0,0 @@
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 8 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: repostats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
uses: jgehrcke/github-repo-stats@v1.1.0
|
||||
with:
|
||||
# Define the stats repository (the repo to fetch
|
||||
# stats for and to generate the report for).
|
||||
# Remove the parameter when the stats repository
|
||||
# and the data repository are the same.
|
||||
repository: signoz/signoz
|
||||
# Set a GitHub API token that can read the stats
|
||||
# repository, and that can push to the data
|
||||
# repository (which this workflow file lives in),
|
||||
# to store data and the report files.
|
||||
ghtoken: ${{ github.token }}
|
||||
5
.github/workflows/sonar.yml
vendored
5
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v*
|
||||
- develop
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Sonar analysis
|
||||
@@ -24,4 +24,3 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
|
||||
40
.github/workflows/staging-deployment.yaml
vendored
Normal file
40
.github/workflows/staging-deployment.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to develop branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest develop branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
env:
|
||||
GITHUB_BRANCH: develop
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-signoz
|
||||
39
.github/workflows/testing-deployment.yaml
vendored
Normal file
39
.github/workflows/testing-deployment.yaml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: testing-deployment
|
||||
# Trigger deployment only on testing-deploy label on pull request
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy PR branch to testing
|
||||
runs-on: ubuntu-latest
|
||||
environment: testing
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
docker system prune --force
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-signoz
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,7 +1,5 @@
|
||||
|
||||
node_modules
|
||||
yarn.lock
|
||||
package.json
|
||||
|
||||
deploy/docker/environment_tiny/common_test
|
||||
frontend/node_modules
|
||||
@@ -52,4 +50,6 @@ ee/query-service/tests/test-deploy/data/
|
||||
*.db
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
bin/
|
||||
|
||||
*/query-service/queries.active
|
||||
|
||||
@@ -80,7 +80,7 @@ Before sending us a pull request, please ensure that,
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
|
||||
|
||||
* 1️⃣ First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes, etc...
|
||||
@@ -215,9 +215,26 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
|
||||
|
||||
## 4.1 To run ClickHouse setup (recommended for local development)
|
||||
## 4.1 Prerequisites
|
||||
|
||||
### 4.1.1 Install SQLite3
|
||||
|
||||
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
|
||||
|
||||
- If not installed already, Install using below command
|
||||
- on Linux
|
||||
- on Debian / Ubuntu
|
||||
```
|
||||
sudo apt install sqlite3
|
||||
```
|
||||
- on CentOS / Fedora / RedHat
|
||||
```
|
||||
sudo yum install sqlite3
|
||||
```
|
||||
|
||||
## 4.2 To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
@@ -321,7 +338,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
@@ -344,7 +361,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
|
||||
41
Makefile
41
Makefile
@@ -45,7 +45,7 @@ build-frontend-amd64:
|
||||
@echo "--> Building frontend docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of frontend(used in push pipeline)
|
||||
@@ -54,7 +54,7 @@ build-push-frontend:
|
||||
@echo "--> Building and pushing frontend docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
|
||||
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
@@ -65,7 +65,7 @@ build-query-service-amd64:
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
@@ -73,7 +73,7 @@ build-push-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
@@ -84,11 +84,11 @@ build-ee-query-service-amd64:
|
||||
@echo "------------------"
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
|
||||
else \
|
||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
|
||||
fi
|
||||
|
||||
@@ -98,7 +98,7 @@ build-push-ee-query-service:
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
|
||||
--progress plain --push --platform linux/arm64,linux/amd64 \
|
||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
dev-setup:
|
||||
@@ -119,16 +119,35 @@ down-local:
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
down -v
|
||||
|
||||
run-x86:
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
run-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||
|
||||
down-x86:
|
||||
down-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
clear-standalone-data:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
clear-swarm-data:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
clear-standalone-ch:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
clear-swarm-ch:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/app/metrics/...
|
||||
go test ./pkg/query-service/cache/...
|
||||
go test ./pkg/query-service/app/...
|
||||
go test ./pkg/query-service/app/querier/...
|
||||
go test ./pkg/query-service/converter/...
|
||||
go test ./pkg/query-service/formatter/...
|
||||
|
||||
@@ -85,9 +85,9 @@ Hier findest du die vollständige Liste von unterstützten Programmiersprachen -
|
||||
|
||||
### Bereitstellung mit Docker
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||
Bitte folge den [hier](https://signoz.io/docs/install/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||
|
||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/install/troubleshooting/) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
80
README.md
80
README.md
@@ -23,7 +23,9 @@
|
||||
|
||||
##
|
||||
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
|
||||
|
||||
👉 Visualise Metrics, Traces and Logs in a single pane of glass
|
||||
|
||||
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
|
||||
|
||||
@@ -31,15 +33,43 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
|
||||
|
||||
👉 Record exceptions automatically in Python, Java, Ruby, and Javascript
|
||||
|
||||
👉 Easy to set alerts with DIY query builder
|
||||
|
||||
|
||||
### Application Metrics
|
||||
|
||||

|
||||
|
||||
|
||||
### Distributed Tracing
|
||||
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
|
||||
|
||||
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
|
||||
|
||||
### Logs Management
|
||||
|
||||
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
|
||||
|
||||
### Infrastructure Monitoring
|
||||
|
||||
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
|
||||
|
||||
### Exceptions Monitoring
|
||||
|
||||

|
||||
|
||||
|
||||
### Alerts
|
||||
|
||||
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Join our Slack community
|
||||
|
||||
@@ -47,20 +77,22 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||
|
||||
## Features:
|
||||
|
||||
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
|
||||
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
|
||||
- Slowest endpoints in your application
|
||||
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
||||
- Filter traces by service name, operation, latency, error, tags/annotations.
|
||||
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
|
||||
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
|
||||
- Lightning quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
|
||||
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
|
||||
- Easy to set alerts with DIY query builder
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||
|
||||
## Why SigNoz?
|
||||
|
||||
@@ -89,15 +121,14 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
|
||||
Please follow the steps listed [here](https://signoz.io/docs/install/docker/) to install using docker
|
||||
|
||||
The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshooting) may be helpful if you face any issues.
|
||||
The [troubleshooting instructions](https://signoz.io/docs/install/troubleshooting/) may be helpful if you face any issues.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
@@ -108,7 +139,6 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Comparisons to Familiar Tools
|
||||
|
||||
@@ -129,9 +159,27 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
- Jaegar UI doesn’t show any metrics on traces or on filtered traces
|
||||
- Jaeger can’t get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Elastic
|
||||
|
||||
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
|
||||
- 50% lower resource requirement compared to Elastic during ingestion
|
||||
|
||||
We have published benchmarks comparing Elastic with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Loki
|
||||
|
||||
- SigNoz supports aggregations on high-cardinality data over a huge volume while loki doesn’t.
|
||||
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
|
||||
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
|
||||
|
||||
We have published benchmarks comparing Loki with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -158,7 +206,6 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -166,7 +213,6 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Community
|
||||
|
||||
|
||||
@@ -84,9 +84,9 @@ Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry
|
||||
|
||||
### Implantar usando Docker
|
||||
|
||||
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
|
||||
Siga as etapas listadas [aqui](https://signoz.io/docs/install/docker/) para instalar usando o Docker.
|
||||
|
||||
Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
|
||||
Esse [guia para solução de problemas](https://signoz.io/docs/install/troubleshooting/) pode ser útil se você enfrentar quaisquer problemas.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
@@ -80,9 +80,9 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
### 使用Docker部署
|
||||
|
||||
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
|
||||
请按照[这里](https://signoz.io/docs/install/docker/)列出的步骤使用Docker来安装
|
||||
|
||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
|
||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/install/troubleshooting/)会对你有帮助。
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
@@ -27,12 +27,6 @@ For x86 chip (amd):
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
```
|
||||
|
||||
For Mac with Apple chip (arm):
|
||||
|
||||
```sh
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
|
||||
the data generated from hotrod in SigNoz UI.
|
||||
|
||||
@@ -64,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
To generate load:
|
||||
@@ -72,7 +66,7 @@ To generate load:
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
To stop load:
|
||||
|
||||
75
deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
Normal file
75
deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
Normal file
@@ -0,0 +1,75 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>clickhouse</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
@@ -236,8 +236,8 @@
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
|
||||
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
|
||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||
To generate dhparams, use the following command:
|
||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
||||
@@ -618,148 +618,6 @@
|
||||
</jdbc_bridge>
|
||||
-->
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
<test_cluster_one_shard_three_replicas_localhost>
|
||||
<shard>
|
||||
<internal_replication>false</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<!--shard>
|
||||
<internal_replication>false</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard-->
|
||||
</test_cluster_one_shard_three_replicas_localhost>
|
||||
<test_cluster_two_shards_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_localhost>
|
||||
<test_cluster_two_shards>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards>
|
||||
<test_cluster_two_shards_internal_replication>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_internal_replication>
|
||||
<test_shard_localhost_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost_secure>
|
||||
<test_unavailable_shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>1</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||
If this section is not present in configuration, all hosts are allowed.
|
||||
-->
|
||||
@@ -786,29 +644,6 @@
|
||||
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
|
||||
<!--
|
||||
<zookeeper>
|
||||
<node>
|
||||
<host>example1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node>
|
||||
<host>example2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node>
|
||||
<host>example3</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
-->
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
|
||||
@@ -7,9 +7,21 @@
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<!-- For S3 cold storage,
|
||||
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
|
||||
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
|
||||
For GCS cold storage,
|
||||
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
|
||||
-->
|
||||
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
<!-- In case of S3, uncomment the below configuration in case you want to read
|
||||
AWS credentials from the Environment variables if they exist. -->
|
||||
<!-- <use_environment_credentials>true</use_environment_credentials> -->
|
||||
<!-- In case of GCS, uncomment the below configuration, since GCS does
|
||||
not support batch deletion and result in error messages in logs. -->
|
||||
<!-- <support_batch_delete>false</support_batch_delete> -->
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
|
||||
@@ -1,33 +1,130 @@
|
||||
version: "3.9"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-clickhouse-depend: &clickhouse-depend
|
||||
depends_on:
|
||||
- clickhouse
|
||||
# - clickhouse-2
|
||||
# - clickhouse-3
|
||||
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
<<: *clickhouse-defaults
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
image: signoz/alertmanager:0.23.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
@@ -40,7 +137,7 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.11.3
|
||||
image: signoz/query-service:0.24.0
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -59,18 +156,17 @@ services:
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
<<: *clickhouse-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.11.3
|
||||
image: signoz/frontend:0.24.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -83,14 +179,16 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.63.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
image: signoz/signoz-otel-collector:0.79.4
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
@@ -107,12 +205,11 @@ services:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
<<: *clickhouse-depend
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/signoz-otel-collector:0.63.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
image: signoz/signoz-otel-collector:0.79.4
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
@@ -123,8 +220,7 @@ services:
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
<<: *clickhouse-depend
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
@@ -137,7 +233,7 @@ services:
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
image: "signoz/locust:1.2.3"
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
|
||||
@@ -64,7 +64,9 @@ receivers:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
@@ -73,17 +75,21 @@ processors:
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions_cache_size: 100000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: 'signoz.collector.id'
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
@@ -103,15 +109,20 @@ processors:
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 5s
|
||||
sending_queue:
|
||||
queue_size: 100
|
||||
@@ -144,9 +155,13 @@ service:
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics, prometheus]
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
|
||||
@@ -7,7 +7,9 @@ receivers:
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector-metrics
|
||||
# SigNoz span metrics
|
||||
- job_name: signozspanmetrics-collector
|
||||
scrape_interval: 60s
|
||||
|
||||
@@ -30,6 +30,8 @@ server {
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
|
||||
75
deploy/docker/clickhouse-setup/clickhouse-cluster.xml
Normal file
75
deploy/docker/clickhouse-setup/clickhouse-cluster.xml
Normal file
@@ -0,0 +1,75 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>clickhouse</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
@@ -236,8 +236,8 @@
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
|
||||
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
|
||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||
To generate dhparams, use the following command:
|
||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
||||
@@ -618,148 +618,6 @@
|
||||
</jdbc_bridge>
|
||||
-->
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
<test_cluster_one_shard_three_replicas_localhost>
|
||||
<shard>
|
||||
<internal_replication>false</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<!--shard>
|
||||
<internal_replication>false</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>127.0.0.3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard-->
|
||||
</test_cluster_one_shard_three_replicas_localhost>
|
||||
<test_cluster_two_shards_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_localhost>
|
||||
<test_cluster_two_shards>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards>
|
||||
<test_cluster_two_shards_internal_replication>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_internal_replication>
|
||||
<test_shard_localhost_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost_secure>
|
||||
<test_unavailable_shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>1</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||
If this section is not present in configuration, all hosts are allowed.
|
||||
-->
|
||||
@@ -786,29 +644,6 @@
|
||||
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
|
||||
<!--
|
||||
<zookeeper>
|
||||
<node>
|
||||
<host>example1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node>
|
||||
<host>example2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node>
|
||||
<host>example3</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
-->
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
@@ -1070,7 +905,8 @@
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Configuration of user defined executable functions -->
|
||||
<user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config>
|
||||
<user_defined_executable_functions_config>*function.xml</user_defined_executable_functions_config>
|
||||
<user_scripts_path>/var/lib/clickhouse/user_scripts/</user_scripts_path>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
|
||||
@@ -7,9 +7,21 @@
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<!-- For S3 cold storage,
|
||||
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
|
||||
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
|
||||
For GCS cold storage,
|
||||
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
|
||||
-->
|
||||
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
<!-- In case of S3, uncomment the below configuration in case you want to read
|
||||
AWS credentials from the Environment variables if they exist. -->
|
||||
<!-- <use_environment_credentials>true</use_environment_credentials> -->
|
||||
<!-- In case of GCS, uncomment the below configuration, since GCS does
|
||||
not support batch deletion and result in error messages in logs. -->
|
||||
<!-- <support_batch_delete>false</support_batch_delete> -->
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
|
||||
21
deploy/docker/clickhouse-setup/custom-function.xml
Normal file
21
deploy/docker/clickhouse-setup/custom-function.xml
Normal file
@@ -0,0 +1,21 @@
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable</type>
|
||||
<name>histogramQuantile</name>
|
||||
<return_type>Float64</return_type>
|
||||
<argument>
|
||||
<type>Array(Float64)</type>
|
||||
<name>buckets</name>
|
||||
</argument>
|
||||
<argument>
|
||||
<type>Array(Float64)</type>
|
||||
<name>counts</name>
|
||||
</argument>
|
||||
<argument>
|
||||
<type>Float64</type>
|
||||
<name>quantile</name>
|
||||
</argument>
|
||||
<format>CSV</format>
|
||||
<command>./histogramQuantile</command>
|
||||
</function>
|
||||
</functions>
|
||||
@@ -27,7 +27,7 @@ services:
|
||||
|
||||
alertmanager:
|
||||
container_name: alertmanager
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
image: signoz/alertmanager:0.23.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
@@ -41,8 +41,8 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: otel-collector
|
||||
image: signoz/signoz-otel-collector:0.63.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
image: signoz/signoz-otel-collector:0.79.4
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
@@ -67,8 +67,8 @@ services:
|
||||
|
||||
otel-collector-metrics:
|
||||
container_name: otel-collector-metrics
|
||||
image: signoz/signoz-otel-collector:0.63.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
image: signoz/signoz-otel-collector:0.79.4
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
@@ -93,7 +93,7 @@ services:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
|
||||
@@ -28,7 +28,7 @@ services:
|
||||
- "8080:8080"
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
@@ -1,31 +1,145 @@
|
||||
version: "2.4"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-clickhouse-depend: &clickhouse-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
<<: *clickhouse-defaults
|
||||
container_name: clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.1}
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
@@ -36,10 +150,10 @@ services:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.11.3
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.24.0}
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
@@ -60,16 +174,14 @@ services:
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
<<: *clickhouse-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.11.3
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.24.0}
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@@ -81,14 +193,16 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.63.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.4}
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
@@ -102,13 +216,11 @@ services:
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
<<: *clickhouse-depend
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/signoz-otel-collector:0.63.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.4}
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
@@ -117,23 +229,21 @@ services:
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
<<: *clickhouse-depend
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
|
||||
@@ -64,9 +64,46 @@ receivers:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
|
||||
processors:
|
||||
logstransform/internal:
|
||||
operators:
|
||||
- type: trace_parser
|
||||
if: '"trace_id" in attributes or "span_id" in attributes'
|
||||
trace_id:
|
||||
parse_from: attributes.trace_id
|
||||
span_id:
|
||||
parse_from: attributes.span_id
|
||||
output: remove_trace_id
|
||||
- type: trace_parser
|
||||
if: '"traceId" in attributes or "spanId" in attributes'
|
||||
trace_id:
|
||||
parse_from: attributes.traceId
|
||||
span_id:
|
||||
parse_from: attributes.spanId
|
||||
output: remove_traceId
|
||||
- id: remove_traceId
|
||||
type: remove
|
||||
if: '"traceId" in attributes'
|
||||
field: attributes.traceId
|
||||
output: remove_spanId
|
||||
- id: remove_spanId
|
||||
type: remove
|
||||
if: '"spanId" in attributes'
|
||||
field: attributes.spanId
|
||||
- id: remove_trace_id
|
||||
type: remove
|
||||
if: '"trace_id" in attributes'
|
||||
field: attributes.trace_id
|
||||
output: remove_span_id
|
||||
- id: remove_span_id
|
||||
type: remove
|
||||
if: '"span_id" in attributes'
|
||||
field: attributes.span_id
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
@@ -74,12 +111,16 @@ processors:
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions_cache_size: 100000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: 'signoz.collector.id'
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
@@ -97,7 +138,7 @@ processors:
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
|
||||
extensions:
|
||||
@@ -111,16 +152,21 @@ extensions:
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 5s
|
||||
sending_queue:
|
||||
queue_size: 100
|
||||
@@ -148,13 +194,17 @@ service:
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics, prometheus]
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
processors: [logstransform/internal, batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -11,7 +11,9 @@ receivers:
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector-metrics
|
||||
# SigNoz span metrics
|
||||
- job_name: signozspanmetrics-collector
|
||||
scrape_interval: 60s
|
||||
|
||||
BIN
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile
Executable file
BIN
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile
Executable file
Binary file not shown.
237
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile.go
Normal file
237
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NOTE: executable must be built with target OS and architecture set to linux/amd64
|
||||
// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
|
||||
|
||||
// The following code is adapted from the following source:
|
||||
// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
|
||||
|
||||
type bucket struct {
|
||||
upperBound float64
|
||||
count float64
|
||||
}
|
||||
|
||||
// buckets implements sort.Interface.
|
||||
type buckets []bucket
|
||||
|
||||
func (b buckets) Len() int { return len(b) }
|
||||
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
|
||||
|
||||
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
|
||||
// buckets will be sorted by upperBound by this function (i.e. no sorting
|
||||
// needed before calling this function). The quantile value is interpolated
|
||||
// assuming a linear distribution within a bucket. However, if the quantile
|
||||
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
|
||||
// returned. A natural lower bound of 0 is assumed if the upper bound of the
|
||||
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
|
||||
// happens linearly between 0 and the upper bound of the lowest bucket.
|
||||
// However, if the lowest bucket has an upper bound less or equal 0, this upper
|
||||
// bound is returned if the quantile falls into the lowest bucket.
|
||||
//
|
||||
// There are a number of special cases (once we have a way to report errors
|
||||
// happening during evaluations of AST functions, we should report those
|
||||
// explicitly):
|
||||
//
|
||||
// If 'buckets' has 0 observations, NaN is returned.
|
||||
//
|
||||
// If 'buckets' has fewer than 2 elements, NaN is returned.
|
||||
//
|
||||
// If the highest bucket is not +Inf, NaN is returned.
|
||||
//
|
||||
// If q==NaN, NaN is returned.
|
||||
//
|
||||
// If q<0, -Inf is returned.
|
||||
//
|
||||
// If q>1, +Inf is returned.
|
||||
func bucketQuantile(q float64, buckets buckets) float64 {
|
||||
if math.IsNaN(q) {
|
||||
return math.NaN()
|
||||
}
|
||||
if q < 0 {
|
||||
return math.Inf(-1)
|
||||
}
|
||||
if q > 1 {
|
||||
return math.Inf(+1)
|
||||
}
|
||||
sort.Sort(buckets)
|
||||
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
|
||||
return math.NaN()
|
||||
}
|
||||
|
||||
buckets = coalesceBuckets(buckets)
|
||||
ensureMonotonic(buckets)
|
||||
|
||||
if len(buckets) < 2 {
|
||||
return math.NaN()
|
||||
}
|
||||
observations := buckets[len(buckets)-1].count
|
||||
if observations == 0 {
|
||||
return math.NaN()
|
||||
}
|
||||
rank := q * observations
|
||||
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
|
||||
|
||||
if b == len(buckets)-1 {
|
||||
return buckets[len(buckets)-2].upperBound
|
||||
}
|
||||
if b == 0 && buckets[0].upperBound <= 0 {
|
||||
return buckets[0].upperBound
|
||||
}
|
||||
var (
|
||||
bucketStart float64
|
||||
bucketEnd = buckets[b].upperBound
|
||||
count = buckets[b].count
|
||||
)
|
||||
if b > 0 {
|
||||
bucketStart = buckets[b-1].upperBound
|
||||
count -= buckets[b-1].count
|
||||
rank -= buckets[b-1].count
|
||||
}
|
||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
||||
}
|
||||
|
||||
// coalesceBuckets merges buckets with the same upper bound.
|
||||
//
|
||||
// The input buckets must be sorted.
|
||||
func coalesceBuckets(buckets buckets) buckets {
|
||||
last := buckets[0]
|
||||
i := 0
|
||||
for _, b := range buckets[1:] {
|
||||
if b.upperBound == last.upperBound {
|
||||
last.count += b.count
|
||||
} else {
|
||||
buckets[i] = last
|
||||
last = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
buckets[i] = last
|
||||
return buckets[:i+1]
|
||||
}
|
||||
|
||||
// The assumption that bucket counts increase monotonically with increasing
|
||||
// upperBound may be violated during:
|
||||
//
|
||||
// * Recording rule evaluation of histogram_quantile, especially when rate()
|
||||
// has been applied to the underlying bucket timeseries.
|
||||
// * Evaluation of histogram_quantile computed over federated bucket
|
||||
// timeseries, especially when rate() has been applied.
|
||||
//
|
||||
// This is because scraped data is not made available to rule evaluation or
|
||||
// federation atomically, so some buckets are computed with data from the
|
||||
// most recent scrapes, but the other buckets are missing data from the most
|
||||
// recent scrape.
|
||||
//
|
||||
// Monotonicity is usually guaranteed because if a bucket with upper bound
|
||||
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
|
||||
// have counted all c1 observations and perhaps more, so that c >= c1.
|
||||
//
|
||||
// Randomly interspersed partial sampling breaks that guarantee, and rate()
|
||||
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
|
||||
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
|
||||
// monotonicity is broken. It is exacerbated by rate() because under normal
|
||||
// operation, cumulative counting of buckets will cause the bucket counts to
|
||||
// diverge such that small differences from missing samples are not a problem.
|
||||
// rate() removes this divergence.)
|
||||
//
|
||||
// bucketQuantile depends on that monotonicity to do a binary search for the
|
||||
// bucket with the φ-quantile count, so breaking the monotonicity
|
||||
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
|
||||
//
|
||||
// As a somewhat hacky solution until ingestion is atomic per scrape, we
|
||||
// calculate the "envelope" of the histogram buckets, essentially removing
|
||||
// any decreases in the count between successive buckets.
|
||||
|
||||
func ensureMonotonic(buckets buckets) {
|
||||
max := buckets[0].count
|
||||
for i := 1; i < len(buckets); i++ {
|
||||
switch {
|
||||
case buckets[i].count > max:
|
||||
max = buckets[i].count
|
||||
case buckets[i].count < max:
|
||||
buckets[i].count = max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// End of copied code.
|
||||
|
||||
func readLines() []string {
|
||||
r := bufio.NewReader(os.Stdin)
|
||||
bytes := []byte{}
|
||||
lines := []string{}
|
||||
for {
|
||||
line, isPrefix, err := r.ReadLine()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
bytes = append(bytes, line...)
|
||||
if !isPrefix {
|
||||
str := strings.TrimSpace(string(bytes))
|
||||
if len(str) > 0 {
|
||||
lines = append(lines, str)
|
||||
bytes = []byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(bytes) > 0 {
|
||||
lines = append(lines, string(bytes))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func main() {
|
||||
lines := readLines()
|
||||
for _, text := range lines {
|
||||
// Example input
|
||||
// "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
|
||||
// bounds - counts - quantile
|
||||
parts := strings.Split(text, "\",")
|
||||
|
||||
var bucketNumbers []float64
|
||||
// Strip the ends with square brackets
|
||||
text = parts[0][2 : len(parts[0])-1]
|
||||
// Parse the bucket bounds
|
||||
for _, num := range strings.Split(text, ",") {
|
||||
num = strings.TrimSpace(num)
|
||||
number, err := strconv.ParseFloat(num, 64)
|
||||
if err == nil {
|
||||
bucketNumbers = append(bucketNumbers, number)
|
||||
}
|
||||
}
|
||||
|
||||
var bucketCounts []float64
|
||||
// Strip the ends with square brackets
|
||||
text = parts[1][2 : len(parts[1])-1]
|
||||
// Parse the bucket counts
|
||||
for _, num := range strings.Split(text, ",") {
|
||||
num = strings.TrimSpace(num)
|
||||
number, err := strconv.ParseFloat(num, 64)
|
||||
if err == nil {
|
||||
bucketCounts = append(bucketCounts, number)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the quantile
|
||||
q, err := strconv.ParseFloat(parts[2], 64)
|
||||
var b buckets
|
||||
|
||||
if err == nil {
|
||||
for i := 0; i < len(bucketNumbers); i++ {
|
||||
b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
|
||||
}
|
||||
}
|
||||
fmt.Println(bucketQuantile(q, b))
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,8 @@ server {
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
|
||||
@@ -51,7 +51,7 @@ check_os() {
|
||||
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
|
||||
|
||||
case "$os_name" in
|
||||
Ubuntu*)
|
||||
Ubuntu*|Pop!_OS)
|
||||
desired_os=1
|
||||
os="ubuntu"
|
||||
package_manager="apt-get"
|
||||
@@ -81,6 +81,11 @@ check_os() {
|
||||
os="centos"
|
||||
package_manager="yum"
|
||||
;;
|
||||
Rocky*)
|
||||
desired_os=1
|
||||
os="centos"
|
||||
package_manager="yum"
|
||||
;;
|
||||
SLES*)
|
||||
desired_os=1
|
||||
os="sles"
|
||||
@@ -120,7 +125,7 @@ check_ports_occupied() {
|
||||
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
exit 1
|
||||
@@ -223,7 +228,7 @@ wait_for_containers_start() {
|
||||
|
||||
# The while loop is important because for-loops don't work for dynamic values
|
||||
while [[ $timeout -gt 0 ]]; do
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
@@ -244,7 +249,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
echo ""
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -272,7 +277,7 @@ request_sudo() {
|
||||
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompt."
|
||||
echo -e "Please enter your sudo password, if prompted."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
@@ -495,7 +500,7 @@ if [[ $status_code -ne 200 ]]; then
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -511,13 +516,15 @@ else
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
echo "ℹ️ By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "👉 Need help Getting Started?"
|
||||
echo "👉 Need help in Getting Started?"
|
||||
echo -e "Join us on Slack https://signoz.io/slack"
|
||||
echo ""
|
||||
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.17-buster AS builder
|
||||
FROM golang:1.18-buster AS builder
|
||||
|
||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
||||
ARG LD_FLAGS
|
||||
|
||||
@@ -9,16 +9,20 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferDelta bool
|
||||
PreferSpanMetrics bool
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -30,10 +34,13 @@ type APIHandler struct {
|
||||
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags})
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PerferDelta: opts.PreferDelta,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -68,60 +75,75 @@ func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
baseapp.AdminAccess(ah.listLicenses)).
|
||||
am.AdminAccess(ah.listLicenses)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
baseapp.AdminAccess(ah.applyLicense)).
|
||||
am.AdminAccess(ah.applyLicense)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
baseapp.OpenAccess(ah.getFeatureFlags)).
|
||||
am.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
baseapp.OpenAccess(ah.precheckLogin)).
|
||||
am.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
baseapp.OpenAccess(ah.receiveSAML)).
|
||||
am.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/complete/google",
|
||||
am.OpenAccess(ah.receiveGoogleAuth)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
baseapp.AdminAccess(ah.listDomainsByOrg)).
|
||||
am.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
baseapp.AdminAccess(ah.postDomain)).
|
||||
am.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
baseapp.AdminAccess(ah.putDomain)).
|
||||
am.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
baseapp.AdminAccess(ah.deleteDomain)).
|
||||
am.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", baseapp.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/metrics/query_range", baseapp.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
||||
|
||||
ah.APIHandler.RegisterRoutes(router)
|
||||
// PAT APIs
|
||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
|
||||
|
||||
ah.APIHandler.RegisterRoutes(router, am)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
|
||||
versionResponse := basemodel.GetVersionResponse{
|
||||
Version: version,
|
||||
EE: "Y",
|
||||
SetupCompleted: ah.SetupCompleted,
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, versionResponse)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
@@ -90,9 +88,16 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil || invite == nil {
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to validate invite token", err)
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if invite == nil {
|
||||
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
@@ -184,114 +189,149 @@ func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
base64.StdEncoding.Encode(dst, ssoError)
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveGoogleAuth completes google OAuth response and forwards a request
|
||||
// to front-end to sign user in
|
||||
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
var apierr basemodel.BaseApiError
|
||||
|
||||
redirectOnError := func() {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
base64.StdEncoding.Encode(dst, ssoError)
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, string(dst)), http.StatusMovedPermanently)
|
||||
}
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.S().Errorf("[ReceiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
redirectOnError()
|
||||
q := r.URL.Query()
|
||||
if errType := q.Get("error"); errType != "" {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
// the relay state is sent when a login request is submitted to
|
||||
// Idp.
|
||||
relayState := r.FormValue("RelayState")
|
||||
zap.S().Debug("[ReceiveML] relay state", zap.String("relayState", relayState))
|
||||
relayState := q.Get("state")
|
||||
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
redirectOnError()
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// derive domain id from relay state now
|
||||
var domainIdStr string
|
||||
for k, v := range parsedState.Query() {
|
||||
if k == "domainId" && len(v) > 0 {
|
||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||
}
|
||||
}
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process request- failed to parse domain id ifrom relay", zap.Error(err))
|
||||
redirectOnError()
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
domain, apierr := ah.AppDao().GetDomain(ctx, domainId)
|
||||
if (apierr != nil) || domain == nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process request- invalid domain", domainIdStr, zap.Error(apierr))
|
||||
redirectOnError()
|
||||
// now that we have domain, use domain to fetch sso settings.
|
||||
// prepare google callback handler using parsedState -
|
||||
// which contains redirect URL (front-end endpoint)
|
||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||
|
||||
identity, err := callbackHandler.HandleCallback(r)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// the relay state is sent when a login request is submitted to
|
||||
// Idp.
|
||||
relayState := r.FormValue("RelayState")
|
||||
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to prepare saml request for domain (%s): %v", domainId, err)
|
||||
redirectOnError()
|
||||
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domainId, err)
|
||||
redirectOnError()
|
||||
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
zap.S().Errorf("[ReceiveSAML] expired saml response for organization (%s): %v", domainId, err)
|
||||
redirectOnError()
|
||||
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
email := assertionInfo.NameID
|
||||
|
||||
// user email found, now start preparing jwt response
|
||||
userPayload, baseapierr := ah.AppDao().GetUserByEmail(ctx, email)
|
||||
if baseapierr != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to find or register a new user for email %s and org %s", email, domainId, zap.Error(baseapierr.Err))
|
||||
redirectOnError()
|
||||
if email == "" {
|
||||
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to generate access token for email %s and org %s", email, domainId, zap.Error(err))
|
||||
redirectOnError()
|
||||
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
userID := userPayload.User.Id
|
||||
nextPage := fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
userID,
|
||||
tokenStore.RefreshJwt)
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusMovedPermanently)
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,23 @@ package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
featureSet := ah.FF().GetFeatureFlags()
|
||||
featureSet, err := ah.FF().GetFeatureFlags()
|
||||
if err != nil {
|
||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if ah.opts.PreferSpanMetrics {
|
||||
for idx := range featureSet {
|
||||
feature := &featureSet[idx]
|
||||
if feature.Name == basemodel.UseSpanMetrics {
|
||||
featureSet[idx].Active = true
|
||||
}
|
||||
}
|
||||
}
|
||||
ah.Respond(w, featureSet)
|
||||
}
|
||||
|
||||
107
ee/query-service/app/api/pat.go
Normal file
107
ee/query-service/app/api/pat.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func generatePATToken() string {
|
||||
// Generate a 32-byte random token.
|
||||
token := make([]byte, 32)
|
||||
rand.Read(token)
|
||||
// Encode the token in base64.
|
||||
encodedToken := base64.StdEncoding.EncodeToString(token)
|
||||
return encodedToken
|
||||
}
|
||||
|
||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.PAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// All the PATs are associated with the user creating the PAT. Hence, the permissions
|
||||
// associated with the PAT is also equivalent to that of the user.
|
||||
req.UserID = user.Id
|
||||
req.CreatedAt = time.Now().Unix()
|
||||
req.Token = generatePATToken()
|
||||
|
||||
zap.S().Debugf("Got PAT request: %+v", req)
|
||||
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
zap.S().Infof("Get PATs for user: %+v", user.Id)
|
||||
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, pats)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
id := mux.Vars(r)["id"]
|
||||
user, err := auth.GetUserFromRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
if pat.UserID != user.Id {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: fmt.Errorf("unauthorized PAT delete request"),
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
zap.S().Debugf("Delete PAT with id: %+v", id)
|
||||
if apierr := ah.AppDao().DeletePAT(ctx, id); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, map[string]string{"data": "pat deleted successfully"})
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
@@ -91,6 +92,32 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
|
||||
metricPoint.Timestamp = v.UnixMilli()
|
||||
case *float64:
|
||||
metricPoint.Value = *v
|
||||
case **float64:
|
||||
// ch seems to return this type when column is derived from
|
||||
// SELECT count(*)/ SELECT count(*)
|
||||
floatVal := *v
|
||||
if floatVal != nil {
|
||||
metricPoint.Value = *floatVal
|
||||
}
|
||||
case *float32:
|
||||
float32Val := float32(*v)
|
||||
metricPoint.Value = float64(float32Val)
|
||||
case *uint8, *uint64, *uint16, *uint32:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
|
||||
}
|
||||
case *int8, *int16, *int32, *int64:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
||||
}
|
||||
default:
|
||||
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
|
||||
}
|
||||
}
|
||||
sort.Strings(groupBy)
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
@@ -19,13 +22,23 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
baseInterface "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
@@ -33,13 +46,18 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
PromConfigPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferDelta bool
|
||||
PreferSpanMetrics bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
@@ -76,6 +94,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
@@ -102,7 +122,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
} else {
|
||||
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
|
||||
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
@@ -111,20 +139,45 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
reader,
|
||||
serverOptions.DisableRules)
|
||||
serverOptions.DisableRules,
|
||||
lm)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate agent config handler
|
||||
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = usageManager.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferDelta: serverOptions.PreferDelta,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
@@ -173,7 +226,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
@@ -188,13 +241,33 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
|
||||
r := mux.NewRouter()
|
||||
|
||||
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
|
||||
patToken := r.Header.Get("SIGNOZ-API-KEY")
|
||||
if len(patToken) > 0 {
|
||||
zap.S().Debugf("Received a non-zero length PAT token")
|
||||
ctx := context.Background()
|
||||
dao := apiHandler.AppDao()
|
||||
|
||||
user, err := dao.GetUserByPAT(ctx, patToken)
|
||||
if err == nil && user != nil {
|
||||
zap.S().Debugf("Found valid PAT user: %+v", user)
|
||||
return user, nil
|
||||
}
|
||||
if err != nil {
|
||||
zap.S().Debugf("Error while getting user for PAT: %+v", err)
|
||||
}
|
||||
}
|
||||
return baseauth.GetUserFromRequest(r)
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
|
||||
apiHandler.RegisterRoutes(r)
|
||||
apiHandler.RegisterMetricsRoutes(r)
|
||||
apiHandler.RegisterLogsRoutes(r)
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterMetricsRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
@@ -218,7 +291,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -230,7 +303,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -255,15 +328,82 @@ func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
||||
|
||||
data := map[string]interface{}{}
|
||||
var postData *basemodel.QueryRangeParamsV2
|
||||
|
||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||
if r.Body != nil {
|
||||
bodyBytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
r.Body.Close() // must close
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
json.Unmarshal(bodyBytes, &postData)
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
signozMetricNotFound := false
|
||||
|
||||
if postData != nil {
|
||||
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
|
||||
|
||||
if postData.CompositeMetricQuery != nil {
|
||||
data["queryType"] = postData.CompositeMetricQuery.QueryType
|
||||
data["panelType"] = postData.CompositeMetricQuery.PanelType
|
||||
}
|
||||
|
||||
data["datasource"] = postData.DataSource
|
||||
}
|
||||
|
||||
if signozMetricNotFound {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
|
||||
}
|
||||
|
||||
return data, true
|
||||
}
|
||||
|
||||
func getActiveLogs(path string, r *http.Request) {
|
||||
// if path == "/api/v1/dashboards/{uuid}" {
|
||||
// telemetry.GetInstance().AddActiveMetricsUser()
|
||||
// }
|
||||
if path == "/api/v1/logs" {
|
||||
hasFilters := len(r.URL.Query().Get("q"))
|
||||
if hasFilters > 0 {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
|
||||
getActiveLogs(path, r)
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||
if metadataExists {
|
||||
for key, value := range dashboardMetadata {
|
||||
data[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
||||
@@ -279,7 +419,7 @@ func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
@@ -365,7 +505,7 @@ func (s *Server) Start() error {
|
||||
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
|
||||
privatePort = port
|
||||
}
|
||||
fmt.Println("starting private http")
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||
|
||||
@@ -381,6 +521,37 @@ func (s *Server) Start() error {
|
||||
|
||||
}()
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||
err := opamp.InitalizeServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
|
||||
if err != nil {
|
||||
zap.S().Info("opamp ws server failed to start", err)
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
if s.httpServer != nil {
|
||||
if err := s.httpServer.Shutdown(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.privateHTTP != nil {
|
||||
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
opamp.StopServer()
|
||||
|
||||
if s.ruleManager != nil {
|
||||
s.ruleManager.Stop()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -390,7 +561,8 @@ func makeRulesManager(
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
disableRules bool) (*rules.Manager, error) {
|
||||
disableRules bool,
|
||||
fm baseInterface.FeatureLookup) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -417,6 +589,7 @@ func makeRulesManager(
|
||||
Context: context.Background(),
|
||||
Logger: nil,
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -2,6 +2,7 @@ package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -22,6 +23,8 @@ type ModelDao interface {
|
||||
// auth methods
|
||||
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
|
||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
|
||||
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
|
||||
|
||||
// org domain (auth domains) CRUD ops
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
||||
@@ -30,4 +33,11 @@ type ModelDao interface {
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
|
||||
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError)
|
||||
DeletePAT(ctx context.Context, id string) basemodel.BaseApiError
|
||||
}
|
||||
|
||||
@@ -10,9 +10,33 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// PrepareSsoRedirect prepares redirect page link after SSO response
|
||||
// is successfully parsed (i.e. valid email is available)
|
||||
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
|
||||
|
||||
userPayload, apierr := m.GetUserByEmail(ctx, email)
|
||||
if !apierr.IsNil() {
|
||||
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
|
||||
return "", model.BadRequestStr("invalid user email received from the auth provider")
|
||||
}
|
||||
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to generate token for SSO login user", err)
|
||||
return "", model.InternalErrorStr("failed to generate token for the user")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
userPayload.User.Id,
|
||||
tokenStore.RefreshJwt), nil
|
||||
}
|
||||
|
||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -25,6 +26,34 @@ type StoredDomain struct {
|
||||
UpdatedAt int64 `db:"updated_at"`
|
||||
}
|
||||
|
||||
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
|
||||
// user domain. The domain is further used to process validity of the response.
|
||||
// when sending login request to IdP we send relay state as URL (site url)
|
||||
// with domainId as query parameter.
|
||||
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
|
||||
// derive domain id from relay state now
|
||||
var domainIdStr string
|
||||
for k, v := range relayState.Query() {
|
||||
if k == "domainId" && len(v) > 0 {
|
||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||
}
|
||||
}
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to parse domain id from relay state", err)
|
||||
return nil, fmt.Errorf("failed to parse response from IdP response")
|
||||
}
|
||||
|
||||
domain, err := m.GetDomain(ctx, domainId)
|
||||
if (err != nil) || domain == nil {
|
||||
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// GetDomain returns org domain for a given domain id
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
|
||||
@@ -48,7 +48,17 @@ func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
updated_at INTEGER,
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY(org_id) REFERENCES organizations(id)
|
||||
);`
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS personal_access_tokens (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id TEXT NOT NULL,
|
||||
token TEXT NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
expires_at INTEGER NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id)
|
||||
);
|
||||
`
|
||||
|
||||
_, err = m.DB().Exec(table_schema)
|
||||
if err != nil {
|
||||
|
||||
106
ee/query-service/dao/sqlite/pat.go
Normal file
106
ee/query-service/dao/sqlite/pat.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
|
||||
p.UserID,
|
||||
p.Token,
|
||||
p.Name,
|
||||
p.CreatedAt,
|
||||
p.ExpiresAt)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("PAT insertion failed"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=?;`, userID); err != nil {
|
||||
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err))
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
|
||||
}
|
||||
return pats, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiError {
|
||||
_, err := m.DB().ExecContext(ctx, `DELETE from personal_access_tokens where id=?;`, id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("Failed to delete PAT, err: %v", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("failed to delete PAT"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=?;`, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
|
||||
pats := []model.PAT{}
|
||||
|
||||
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=?;`, id); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
|
||||
}
|
||||
|
||||
if len(pats) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple PATs with same token"),
|
||||
}
|
||||
}
|
||||
|
||||
return &pats[0], nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
|
||||
users := []basemodel.UserPayload{}
|
||||
|
||||
query := `SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
u.email,
|
||||
u.password,
|
||||
u.created_at,
|
||||
u.profile_picture_url,
|
||||
u.org_id,
|
||||
u.group_id
|
||||
FROM users u, personal_access_tokens p
|
||||
WHERE u.id = p.user_id and p.token=?;`
|
||||
|
||||
if err := m.DB().Select(&users, query, token); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
return &users[0], nil
|
||||
}
|
||||
@@ -127,7 +127,7 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
|
||||
}
|
||||
|
||||
// SendUsage reports the usage of signoz to license server
|
||||
func SendUsage(ctx context.Context, usage *model.UsagePayload) *model.ApiError {
|
||||
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
|
||||
reqString, _ := json.Marshal(usage)
|
||||
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/license/sqlite"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -125,3 +127,79 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) CreateFeature(req *basemodel.Feature) *basemodel.ApiError {
|
||||
|
||||
_, err := r.db.Exec(
|
||||
`INSERT INTO feature_status (name, active, usage, usage_limit, route)
|
||||
VALUES (?, ?, ?, ?, ?);`,
|
||||
req.Name, req.Active, req.Usage, req.UsageLimit, req.Route)
|
||||
if err != nil {
|
||||
return &basemodel.ApiError{Typ: basemodel.ErrorInternal, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetFeature(featureName string) (basemodel.Feature, error) {
|
||||
|
||||
var feature basemodel.Feature
|
||||
|
||||
err := r.db.Get(&feature,
|
||||
`SELECT * FROM feature_status WHERE name = ?;`, featureName)
|
||||
if err != nil {
|
||||
return feature, err
|
||||
}
|
||||
if feature.Name == "" {
|
||||
return feature, basemodel.ErrFeatureUnavailable{Key: featureName}
|
||||
}
|
||||
return feature, nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetAllFeatures() ([]basemodel.Feature, error) {
|
||||
|
||||
var feature []basemodel.Feature
|
||||
|
||||
err := r.db.Select(&feature,
|
||||
`SELECT * FROM feature_status;`)
|
||||
if err != nil {
|
||||
return feature, err
|
||||
}
|
||||
|
||||
return feature, nil
|
||||
}
|
||||
|
||||
func (r *Repo) UpdateFeature(req basemodel.Feature) error {
|
||||
|
||||
_, err := r.db.Exec(
|
||||
`UPDATE feature_status SET active = ?, usage = ?, usage_limit = ?, route = ? WHERE name = ?;`,
|
||||
req.Active, req.Usage, req.UsageLimit, req.Route, req.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
|
||||
// get a feature by name, if it doesn't exist, create it. If it does exist, update it.
|
||||
for _, feature := range req {
|
||||
currentFeature, err := r.GetFeature(feature.Name)
|
||||
if err != nil && err == sql.ErrNoRows {
|
||||
err := r.CreateFeature(&feature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
feature.Usage = currentFeature.Usage
|
||||
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
|
||||
feature.Active = false
|
||||
}
|
||||
err = r.UpdateFeature(feature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -96,6 +96,11 @@ func (lm *Manager) SetActive(l *model.License) {
|
||||
lm.activeFeatures = l.FeatureSet
|
||||
// set default features
|
||||
setDefaultFeatures(lm)
|
||||
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.S().Panicf("Couldn't activate features: %v", err)
|
||||
}
|
||||
if !lm.validatorRunning {
|
||||
// we want to make sure only one validator runs,
|
||||
// we already have lock() so good to go
|
||||
@@ -106,9 +111,7 @@ func (lm *Manager) SetActive(l *model.License) {
|
||||
}
|
||||
|
||||
func setDefaultFeatures(lm *Manager) {
|
||||
for k, v := range baseconstants.DEFAULT_FEATURE_SET {
|
||||
lm.activeFeatures[k] = v
|
||||
}
|
||||
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
||||
}
|
||||
|
||||
// LoadActiveLicense loads the most recent active license
|
||||
@@ -123,8 +126,13 @@ func (lm *Manager) LoadActiveLicense() error {
|
||||
} else {
|
||||
zap.S().Info("No active license found, defaulting to basic plan")
|
||||
// if no active license is found, we default to basic(free) plan with all default features
|
||||
lm.activeFeatures = basemodel.BasicPlan
|
||||
lm.activeFeatures = model.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.S().Error("Couldn't initialize features: ", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -291,18 +299,31 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
|
||||
// CheckFeature will be internally used by backend routines
|
||||
// for feature gating
|
||||
func (lm *Manager) CheckFeature(featureKey string) error {
|
||||
if value, ok := lm.activeFeatures[featureKey]; ok {
|
||||
if value {
|
||||
return nil
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
feature, err := lm.repo.GetFeature(featureKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if feature.Active {
|
||||
return nil
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
|
||||
// GetFeatureFlags returns current active features
|
||||
func (lm *Manager) GetFeatureFlags() basemodel.FeatureSet {
|
||||
return lm.activeFeatures
|
||||
func (lm *Manager) GetFeatureFlags() (basemodel.FeatureSet, error) {
|
||||
return lm.repo.GetAllFeatures()
|
||||
}
|
||||
|
||||
func (lm *Manager) InitFeatures(features basemodel.FeatureSet) error {
|
||||
return lm.repo.InitFeatures(features)
|
||||
}
|
||||
|
||||
func (lm *Manager) UpdateFeatureFlag(feature basemodel.Feature) error {
|
||||
return lm.repo.UpdateFeature(feature)
|
||||
}
|
||||
|
||||
func (lm *Manager) GetFeatureFlag(key string) (basemodel.Feature, error) {
|
||||
return lm.repo.GetFeature(key)
|
||||
}
|
||||
|
||||
// GetRepo return the license repo
|
||||
|
||||
@@ -2,6 +2,7 @@ package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
@@ -33,5 +34,19 @@ func InitDB(db *sqlx.DB) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in creating licenses table: %s", err.Error())
|
||||
}
|
||||
|
||||
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
|
||||
name TEXT PRIMARY KEY,
|
||||
active bool,
|
||||
usage INTEGER DEFAULT 0,
|
||||
usage_limit INTEGER DEFAULT 0,
|
||||
route TEXT
|
||||
);`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in creating feature_status table: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,30 +3,78 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
|
||||
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog() *zap.Logger {
|
||||
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
|
||||
config := zap.NewDevelopmentConfig()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
|
||||
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
|
||||
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
|
||||
defaultLogLevel := zapcore.DebugLevel
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("query-service"),
|
||||
)
|
||||
|
||||
core := zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
)
|
||||
|
||||
if enableQueryServiceLogOTLPExport == true {
|
||||
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
|
||||
if err != nil {
|
||||
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
|
||||
} else {
|
||||
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
|
||||
if err != nil {
|
||||
logExportBatchSizeInt = 1000
|
||||
}
|
||||
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
|
||||
BatchSize: logExportBatchSizeInt,
|
||||
ResourceSchema: semconv.SchemaURL,
|
||||
Resource: res,
|
||||
}))
|
||||
core = zapcore.NewTee(
|
||||
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
|
||||
zapcore.NewCore(otlpEncoder, zapcore.NewMultiWriteSyncer(ws), defaultLogLevel),
|
||||
)
|
||||
}
|
||||
}
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
|
||||
|
||||
return logger
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath string
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
// disables rule execution but allows change to the rule definition
|
||||
var disableRules bool
|
||||
@@ -34,12 +82,20 @@ func main() {
|
||||
// the url used to build link in the alert messages in slack and other systems
|
||||
var ruleRepoURL string
|
||||
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferDelta bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog()
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
|
||||
@@ -47,11 +103,14 @@ func main() {
|
||||
version.PrintVersion()
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferDelta: preferDelta,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
saml2 "github.com/russellhaering/gosaml2"
|
||||
"go.signoz.io/signoz/ee/query-service/saml"
|
||||
"go.signoz.io/signoz/ee/query-service/sso/saml"
|
||||
"go.signoz.io/signoz/ee/query-service/sso"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type SSOType string
|
||||
@@ -20,12 +22,6 @@ const (
|
||||
GoogleAuth SSOType = "GOOGLE_AUTH"
|
||||
)
|
||||
|
||||
type SamlConfig struct {
|
||||
SamlEntity string `json:"samlEntity"`
|
||||
SamlIdp string `json:"samlIdp"`
|
||||
SamlCert string `json:"samlCert"`
|
||||
}
|
||||
|
||||
// OrgDomain identify org owned web domains for auth and other purposes
|
||||
type OrgDomain struct {
|
||||
Id uuid.UUID `json:"id"`
|
||||
@@ -33,10 +29,17 @@ type OrgDomain struct {
|
||||
OrgId string `json:"orgId"`
|
||||
SsoEnabled bool `json:"ssoEnabled"`
|
||||
SsoType SSOType `json:"ssoType"`
|
||||
|
||||
SamlConfig *SamlConfig `json:"samlConfig"`
|
||||
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
|
||||
|
||||
Org *basemodel.Organization
|
||||
}
|
||||
|
||||
func (od *OrgDomain) String() string {
|
||||
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.Id.String(), od.SsoType)
|
||||
}
|
||||
|
||||
// Valid is used a pipeline function to check if org domain
|
||||
// loaded from db is valid
|
||||
func (od *OrgDomain) Valid(err error) error {
|
||||
@@ -97,6 +100,16 @@ func (od *OrgDomain) GetSAMLCert() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
|
||||
// requesting OAuth and also used in processing response from google
|
||||
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
||||
if od.GoogleAuthConfig == nil {
|
||||
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
|
||||
}
|
||||
|
||||
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
|
||||
}
|
||||
|
||||
// PrepareSamlRequest creates a request accordingly gosaml2
|
||||
func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
|
||||
|
||||
@@ -124,19 +137,48 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
|
||||
}
|
||||
|
||||
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||
|
||||
sp, err := od.PrepareSamlRequest(siteUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
||||
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
||||
|
||||
// build redirect url from window.location sent by frontend
|
||||
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
|
||||
|
||||
// prepare state that gets relayed back when the auth provider
|
||||
// calls back our url. here we pass the app url (where signoz runs)
|
||||
// and the domain Id. The domain Id helps in identifying sso config
|
||||
// when the call back occurs and the app url is useful in redirecting user
|
||||
// back to the right path.
|
||||
// why do we need to pass app url? the callback typically is handled by backend
|
||||
// and sometimes backend might right at a different port or is unaware of frontend
|
||||
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
|
||||
// along with frontend window.location and use it to relay the information through
|
||||
// auth provider to the backend (HandleCallback or HandleSSO method).
|
||||
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
|
||||
|
||||
|
||||
switch (od.SsoType) {
|
||||
case SAML:
|
||||
|
||||
sp, err := od.PrepareSamlRequest(siteUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return sp.BuildAuthURL(relayState)
|
||||
|
||||
case GoogleAuth:
|
||||
|
||||
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return googleProvider.BuildAuthURL(relayState)
|
||||
|
||||
default:
|
||||
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
|
||||
return "", fmt.Errorf("unsupported SSO config for the domain")
|
||||
}
|
||||
|
||||
relayState := fmt.Sprintf("%s://%s%s?domainId=%s",
|
||||
siteUrl.Scheme,
|
||||
siteUrl.Host,
|
||||
siteUrl.Path,
|
||||
fmtDomainId)
|
||||
|
||||
return sp.BuildAuthURL(relayState)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
@@ -44,6 +45,14 @@ func BadRequest(err error) *ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
// BadRequestStr returns a ApiError object of bad request for string input
|
||||
func BadRequestStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorBadData,
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
|
||||
// InternalError returns a ApiError object of internal type
|
||||
func InternalError(err error) *ApiError {
|
||||
return &ApiError{
|
||||
@@ -52,6 +61,14 @@ func InternalError(err error) *ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// InternalErrorStr returns a ApiError object of internal type for string input
|
||||
func InternalErrorStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
var (
|
||||
ErrorNone basemodel.ErrorType = ""
|
||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
||||
|
||||
10
ee/query-service/model/pat.go
Normal file
10
ee/query-service/model/pat.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package model
|
||||
|
||||
type PAT struct {
|
||||
Id string `json:"id" db:"id"`
|
||||
UserID string `json:"userId" db:"user_id"`
|
||||
Token string `json:"token" db:"token"`
|
||||
Name string `json:"name" db:"name"`
|
||||
CreatedAt int64 `json:"createdAt" db:"created_at"`
|
||||
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
|
||||
}
|
||||
@@ -11,21 +11,164 @@ const Enterprise = "ENTERPRISE_PLAN"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
|
||||
var BasicPlan = basemodel.FeatureSet{
|
||||
Basic: true,
|
||||
SSO: false,
|
||||
DisableUpsell: false,
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 5,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 5,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
Pro: true,
|
||||
SSO: true,
|
||||
basemodel.SmartTraceDetail: true,
|
||||
basemodel.CustomMetricsFunction: true,
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
Enterprise: true,
|
||||
SSO: true,
|
||||
basemodel.SmartTraceDetail: true,
|
||||
basemodel.CustomMetricsFunction: true,
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.SmartTraceDetail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
68
ee/query-service/model/sso.go
Normal file
68
ee/query-service/model/sso.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"net/url"
|
||||
"golang.org/x/oauth2"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"go.signoz.io/signoz/ee/query-service/sso"
|
||||
)
|
||||
|
||||
// SamlConfig contans SAML params to generate and respond to the requests
|
||||
// from SAML provider
|
||||
type SamlConfig struct {
|
||||
SamlEntity string `json:"samlEntity"`
|
||||
SamlIdp string `json:"samlIdp"`
|
||||
SamlCert string `json:"samlCert"`
|
||||
}
|
||||
|
||||
// GoogleOauthConfig contains a generic config to support oauth
|
||||
type GoogleOAuthConfig struct {
|
||||
ClientID string `json:"clientId"`
|
||||
ClientSecret string `json:"clientSecret"`
|
||||
RedirectURI string `json:"redirectURI"`
|
||||
}
|
||||
|
||||
|
||||
const (
|
||||
googleIssuerURL = "https://accounts.google.com"
|
||||
)
|
||||
|
||||
func (g *GoogleOAuthConfig) GetProvider(domain string, siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
provider, err := oidc.NewProvider(ctx, googleIssuerURL)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, fmt.Errorf("failed to get provider: %v", err)
|
||||
}
|
||||
|
||||
// default to email and profile scope as we just use google auth
|
||||
// to verify identity and start a session.
|
||||
scopes := []string{"email"}
|
||||
|
||||
// this is the url google will call after login completion
|
||||
redirectURL := fmt.Sprintf("%s://%s/%s",
|
||||
siteUrl.Scheme,
|
||||
siteUrl.Host,
|
||||
"api/v1/complete/google")
|
||||
|
||||
return &sso.GoogleOAuthProvider{
|
||||
RedirectURI: g.RedirectURI,
|
||||
OAuth2Config: &oauth2.Config{
|
||||
ClientID: g.ClientID,
|
||||
ClientSecret: g.ClientSecret,
|
||||
Endpoint: provider.Endpoint(),
|
||||
Scopes: scopes,
|
||||
RedirectURL: redirectURL,
|
||||
},
|
||||
Verifier: provider.Verifier(
|
||||
&oidc.Config{ClientID: g.ClientID},
|
||||
),
|
||||
Cancel: cancel,
|
||||
HostedDomain: domain,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -6,30 +6,27 @@ import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type UsageSnapshot struct {
|
||||
CurrentLogSizeBytes uint64 `json:"currentLogSizeBytes"`
|
||||
CurrentLogSizeBytesColdStorage uint64 `json:"currentLogSizeBytesColdStorage"`
|
||||
CurrentSpansCount uint64 `json:"currentSpansCount"`
|
||||
CurrentSpansCountColdStorage uint64 `json:"currentSpansCountColdStorage"`
|
||||
CurrentSamplesCount uint64 `json:"currentSamplesCount"`
|
||||
CurrentSamplesCountColdStorage uint64 `json:"currentSamplesCountColdStorage"`
|
||||
}
|
||||
|
||||
type UsageBase struct {
|
||||
Id uuid.UUID `json:"id" db:"id"`
|
||||
InstallationId uuid.UUID `json:"installationId" db:"installation_id"`
|
||||
ActivationId uuid.UUID `json:"activationId" db:"activation_id"`
|
||||
CreatedAt time.Time `json:"createdAt" db:"created_at"`
|
||||
FailedSyncRequest int `json:"failedSyncRequest" db:"failed_sync_request_count"`
|
||||
}
|
||||
|
||||
type UsagePayload struct {
|
||||
UsageBase
|
||||
Metrics UsageSnapshot `json:"metrics"`
|
||||
SnapshotDate time.Time `json:"snapshotDate"`
|
||||
InstallationId uuid.UUID `json:"installationId"`
|
||||
LicenseKey uuid.UUID `json:"licenseKey"`
|
||||
Usage []Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
UsageBase
|
||||
Snapshot string `db:"snapshot"`
|
||||
CollectorID string `json:"collectorId"`
|
||||
ExporterID string `json:"exporterId"`
|
||||
Type string `json:"type"`
|
||||
Tenant string `json:"tenant"`
|
||||
TimeStamp time.Time `json:"timestamp"`
|
||||
Count int64 `json:"count"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type UsageDB struct {
|
||||
CollectorID string `ch:"collector_id" json:"collectorId"`
|
||||
ExporterID string `ch:"exporter_id" json:"exporterId"`
|
||||
Type string `ch:"-" json:"type"`
|
||||
TimeStamp time.Time `ch:"timestamp" json:"timestamp"`
|
||||
Tenant string `ch:"tenant" json:"tenant"`
|
||||
Data string `ch:"data" json:"data"`
|
||||
}
|
||||
|
||||
92
ee/query-service/sso/google.go
Normal file
92
ee/query-service/sso/google.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package sso
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"context"
|
||||
"net/http"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
type GoogleOAuthProvider struct {
|
||||
RedirectURI string
|
||||
OAuth2Config *oauth2.Config
|
||||
Verifier *oidc.IDTokenVerifier
|
||||
Cancel context.CancelFunc
|
||||
HostedDomain string
|
||||
}
|
||||
|
||||
|
||||
func (g *GoogleOAuthProvider) BuildAuthURL(state string) (string, error) {
|
||||
var opts []oauth2.AuthCodeOption
|
||||
|
||||
// set hosted domain. google supports multiple hosted domains but in our case
|
||||
// we have one config per host domain.
|
||||
opts = append(opts, oauth2.SetAuthURLParam("hd", g.HostedDomain))
|
||||
|
||||
return g.OAuth2Config.AuthCodeURL(state, opts...), nil
|
||||
}
|
||||
|
||||
type oauth2Error struct{
|
||||
error string
|
||||
errorDescription string
|
||||
}
|
||||
|
||||
func (e *oauth2Error) Error() string {
|
||||
if e.errorDescription == "" {
|
||||
return e.error
|
||||
}
|
||||
return e.error + ": " + e.errorDescription
|
||||
}
|
||||
|
||||
func (g *GoogleOAuthProvider) HandleCallback(r *http.Request) (identity *SSOIdentity, err error) {
|
||||
q := r.URL.Query()
|
||||
if errType := q.Get("error"); errType != "" {
|
||||
return identity, &oauth2Error{errType, q.Get("error_description")}
|
||||
}
|
||||
|
||||
token, err := g.OAuth2Config.Exchange(r.Context(), q.Get("code"))
|
||||
if err != nil {
|
||||
return identity, fmt.Errorf("google: failed to get token: %v", err)
|
||||
}
|
||||
|
||||
return g.createIdentity(r.Context(), token)
|
||||
}
|
||||
|
||||
|
||||
func (g *GoogleOAuthProvider) createIdentity(ctx context.Context, token *oauth2.Token) (identity *SSOIdentity, err error) {
|
||||
rawIDToken, ok := token.Extra("id_token").(string)
|
||||
if !ok {
|
||||
return identity, errors.New("google: no id_token in token response")
|
||||
}
|
||||
idToken, err := g.Verifier.Verify(ctx, rawIDToken)
|
||||
if err != nil {
|
||||
return identity, fmt.Errorf("google: failed to verify ID Token: %v", err)
|
||||
}
|
||||
|
||||
var claims struct {
|
||||
Username string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
EmailVerified bool `json:"email_verified"`
|
||||
HostedDomain string `json:"hd"`
|
||||
}
|
||||
if err := idToken.Claims(&claims); err != nil {
|
||||
return identity, fmt.Errorf("oidc: failed to decode claims: %v", err)
|
||||
}
|
||||
|
||||
if claims.HostedDomain != g.HostedDomain {
|
||||
return identity, fmt.Errorf("oidc: unexpected hd claim %v", claims.HostedDomain)
|
||||
}
|
||||
|
||||
identity = &SSOIdentity{
|
||||
UserID: idToken.Subject,
|
||||
Username: claims.Username,
|
||||
Email: claims.Email,
|
||||
EmailVerified: claims.EmailVerified,
|
||||
ConnectorData: []byte(token.RefreshToken),
|
||||
}
|
||||
|
||||
return identity, nil
|
||||
}
|
||||
|
||||
31
ee/query-service/sso/model.go
Normal file
31
ee/query-service/sso/model.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package sso
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// SSOIdentity contains details of user received from SSO provider
|
||||
type SSOIdentity struct {
|
||||
UserID string
|
||||
Username string
|
||||
PreferredUsername string
|
||||
Email string
|
||||
EmailVerified bool
|
||||
ConnectorData []byte
|
||||
}
|
||||
|
||||
// OAuthCallbackProvider is an interface implemented by connectors which use an OAuth
|
||||
// style redirect flow to determine user information.
|
||||
type OAuthCallbackProvider interface {
|
||||
// The initial URL user would be redirect to.
|
||||
// OAuth2 implementations support various scopes but we only need profile and user as
|
||||
// the roles are still being managed in SigNoz.
|
||||
BuildAuthURL(state string) (string, error)
|
||||
|
||||
// Handle the callback to the server (after login at oauth provider site)
|
||||
// and return a email identity.
|
||||
// At the moment we dont support auto signup flow (based on domain), so
|
||||
// the full identity (including name, group etc) is not required outside of the
|
||||
// connector
|
||||
HandleCallback(r *http.Request) (identity *SSOIdentity, err error)
|
||||
}
|
||||
@@ -4,18 +4,19 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/ee/query-service/usage/repository"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
|
||||
)
|
||||
|
||||
@@ -27,9 +28,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// collect usage every hour
|
||||
collectionFrequency = 1 * time.Hour
|
||||
|
||||
// send usage every 24 hour
|
||||
uploadFrequency = 24 * time.Hour
|
||||
|
||||
@@ -37,8 +35,6 @@ var (
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
repository *repository.Repository
|
||||
|
||||
clickhouseConn clickhouse.Conn
|
||||
|
||||
licenseRepo *license.Repo
|
||||
@@ -52,15 +48,9 @@ type Manager struct {
|
||||
}
|
||||
|
||||
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||
repo := repository.New(db)
|
||||
|
||||
err := repo.Init(dbType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate usage repo: %v", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
repository: repo,
|
||||
// repository: repo,
|
||||
clickhouseConn: clickhouseConn,
|
||||
licenseRepo: licenseRepo,
|
||||
}
|
||||
@@ -74,6 +64,28 @@ func (lm *Manager) Start() error {
|
||||
return fmt.Errorf("usage exporter is locked")
|
||||
}
|
||||
|
||||
go lm.UsageExporter(context.Background())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UsageExporter(ctx context.Context) {
|
||||
defer close(lm.terminated)
|
||||
|
||||
uploadTicker := time.NewTicker(uploadFrequency)
|
||||
defer uploadTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-uploadTicker.C:
|
||||
lm.UploadUsage(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsage(ctx context.Context) error {
|
||||
// check if license is present or not
|
||||
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
|
||||
if err != nil {
|
||||
@@ -85,203 +97,81 @@ func (lm *Manager) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload previous snapshots if any
|
||||
err = lm.UploadUsage(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// collect snapshot if incase it wasn't collect in (t - collectionFrequency)
|
||||
err = lm.CollectCurrentUsage(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go lm.UsageExporter(context.Background())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectCurrentUsage checks if needs to collect usage data
|
||||
func (lm *Manager) CollectCurrentUsage(ctx context.Context) error {
|
||||
// check the DB if anything exist where timestamp > t - collectionFrequency
|
||||
ts := time.Now().Add(-collectionFrequency)
|
||||
alreadyCreated, err := lm.repository.CheckSnapshotGtCreatedAt(ctx, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !alreadyCreated {
|
||||
zap.S().Info("Collecting current usage")
|
||||
exportError := lm.CollectAndStoreUsage(ctx)
|
||||
if exportError != nil {
|
||||
return exportError
|
||||
}
|
||||
} else {
|
||||
zap.S().Info("Nothing to collect")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UsageExporter(ctx context.Context) {
|
||||
defer close(lm.terminated)
|
||||
|
||||
collectionTicker := time.NewTicker(collectionFrequency)
|
||||
defer collectionTicker.Stop()
|
||||
|
||||
uploadTicker := time.NewTicker(uploadFrequency)
|
||||
defer uploadTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-collectionTicker.C:
|
||||
lm.CollectAndStoreUsage(ctx)
|
||||
case <-uploadTicker.C:
|
||||
lm.UploadUsage(ctx)
|
||||
// remove the old snapshots
|
||||
lm.repository.DropOldSnapshots(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TableSize struct {
|
||||
Table string `ch:"table"`
|
||||
DiskName string `ch:"disk_name"`
|
||||
Rows uint64 `ch:"rows"`
|
||||
UncompressedBytes uint64 `ch:"uncompressed_bytes"`
|
||||
}
|
||||
|
||||
func (lm *Manager) CollectAndStoreUsage(ctx context.Context) error {
|
||||
snap, err := lm.GetUsageFromClickHouse(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activationId, _ := uuid.Parse(license.ActivationId)
|
||||
// TODO (nitya) : Add installation ID in the payload
|
||||
payload := model.UsagePayload{
|
||||
UsageBase: model.UsageBase{
|
||||
ActivationId: activationId,
|
||||
FailedSyncRequest: 0,
|
||||
},
|
||||
Metrics: *snap,
|
||||
SnapshotDate: time.Now(),
|
||||
}
|
||||
|
||||
err = lm.repository.InsertSnapshot(ctx, &payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) GetUsageFromClickHouse(ctx context.Context) (*model.UsageSnapshot, error) {
|
||||
tableSizes := []TableSize{}
|
||||
snap := model.UsageSnapshot{}
|
||||
usages := []model.UsageDB{}
|
||||
|
||||
// get usage from clickhouse
|
||||
dbs := []string{"signoz_logs", "signoz_traces", "signoz_metrics"}
|
||||
query := `
|
||||
SELECT
|
||||
table,
|
||||
disk_name,
|
||||
sum(rows) as rows,
|
||||
sum(data_uncompressed_bytes) AS uncompressed_bytes
|
||||
FROM system.parts
|
||||
WHERE active AND (database in ('signoz_logs', 'signoz_metrics', 'signoz_traces')) AND (table in ('logs','samples_v2', 'signoz_index_v2'))
|
||||
GROUP BY
|
||||
table,
|
||||
disk_name
|
||||
ORDER BY table
|
||||
SELECT tenant, collector_id, exporter_id, timestamp, data
|
||||
FROM %s.distributed_usage as u1
|
||||
GLOBAL INNER JOIN
|
||||
(SELECT
|
||||
tenant, collector_id, exporter_id, MAX(timestamp) as ts
|
||||
FROM %s.distributed_usage as u2
|
||||
where timestamp >= $1
|
||||
GROUP BY tenant, collector_id, exporter_id
|
||||
) as t1
|
||||
ON
|
||||
u1.tenant = t1.tenant AND u1.collector_id = t1.collector_id AND u1.exporter_id = t1.exporter_id and u1.timestamp = t1.ts
|
||||
order by timestamp
|
||||
`
|
||||
err := lm.clickhouseConn.Select(ctx, &tableSizes, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, val := range tableSizes {
|
||||
switch val.Table {
|
||||
case "logs":
|
||||
if val.DiskName == "default" {
|
||||
snap.CurrentLogSizeBytes = val.UncompressedBytes
|
||||
} else {
|
||||
snap.CurrentLogSizeBytesColdStorage = val.UncompressedBytes
|
||||
}
|
||||
case "samples_v2":
|
||||
if val.DiskName == "default" {
|
||||
snap.CurrentSamplesCount = val.Rows
|
||||
} else {
|
||||
snap.CurrentSamplesCountColdStorage = val.Rows
|
||||
}
|
||||
case "signoz_index_v2":
|
||||
if val.DiskName == "default" {
|
||||
snap.CurrentSpansCount = val.Rows
|
||||
} else {
|
||||
snap.CurrentSpansCountColdStorage = val.Rows
|
||||
}
|
||||
for _, db := range dbs {
|
||||
dbusages := []model.UsageDB{}
|
||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||
return err
|
||||
}
|
||||
for _, u := range dbusages {
|
||||
u.Type = db
|
||||
usages = append(usages, u)
|
||||
}
|
||||
}
|
||||
|
||||
return &snap, nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsage(ctx context.Context) error {
|
||||
snapshots, err := lm.repository.GetSnapshotsNotSynced(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(snapshots) <= 0 {
|
||||
if len(usages) <= 0 {
|
||||
zap.S().Info("no snapshots to upload, skipping.")
|
||||
return nil
|
||||
}
|
||||
|
||||
zap.S().Info("uploading snapshots")
|
||||
for _, snap := range snapshots {
|
||||
metricsBytes, err := encryption.Decrypt([]byte(snap.ActivationId.String()[:32]), []byte(snap.Snapshot))
|
||||
zap.S().Info("uploading usage data")
|
||||
|
||||
usagesPayload := []model.Usage{}
|
||||
for _, usage := range usages {
|
||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metrics := model.UsageSnapshot{}
|
||||
err = json.Unmarshal(metricsBytes, &metrics)
|
||||
usageData := model.Usage{}
|
||||
err = json.Unmarshal(usageDataBytes, &usageData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = lm.UploadUsageWithExponentalBackOff(ctx, model.UsagePayload{
|
||||
UsageBase: model.UsageBase{
|
||||
Id: snap.Id,
|
||||
InstallationId: snap.InstallationId,
|
||||
ActivationId: snap.ActivationId,
|
||||
FailedSyncRequest: snap.FailedSyncRequest,
|
||||
},
|
||||
SnapshotDate: snap.CreatedAt,
|
||||
Metrics: metrics,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usageData.CollectorID = usage.CollectorID
|
||||
usageData.ExporterID = usage.ExporterID
|
||||
usageData.Type = usage.Type
|
||||
usageData.Tenant = usage.Tenant
|
||||
usagesPayload = append(usagesPayload, usageData)
|
||||
}
|
||||
|
||||
key, _ := uuid.Parse(license.Key)
|
||||
payload := model.UsagePayload{
|
||||
LicenseKey: key,
|
||||
Usage: usagesPayload,
|
||||
}
|
||||
err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
|
||||
for i := 1; i <= MaxRetries; i++ {
|
||||
apiErr := licenseserver.SendUsage(ctx, &payload)
|
||||
apiErr := licenseserver.SendUsage(ctx, payload)
|
||||
if apiErr != nil && i == MaxRetries {
|
||||
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to updated the failure count for snapshot in DB : ", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
zap.S().Errorf("retries stopped : %v", zap.Error(err))
|
||||
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
|
||||
// not returning error here since it is captured in the failed count
|
||||
return nil
|
||||
} else if apiErr != nil {
|
||||
@@ -289,24 +179,10 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
|
||||
sleepDuration := RetryInterval * time.Duration(i)
|
||||
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// update the failed request count
|
||||
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to updated the failure count for snapshot in DB : %v", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// update the database that it is synced
|
||||
err := lm.repository.MoveToSynced(ctx, payload.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/ee/query-service/usage/sqlite"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxFailedSyncCount = 9 // a snapshot will be ignored if the max failed count is greater than or equal to 9
|
||||
SnapShotLife = 3 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Repository is usage Repository which stores usage snapshot in a secured DB
|
||||
type Repository struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// New initiates a new usage Repository
|
||||
func New(db *sqlx.DB) *Repository {
|
||||
return &Repository{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repository) Init(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repository) InsertSnapshot(ctx context.Context, usage *model.UsagePayload) error {
|
||||
|
||||
snapshotBytes, err := json.Marshal(usage.Metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usage.Id = uuid.New()
|
||||
|
||||
encryptedSnapshot, err := encryption.Encrypt([]byte(usage.ActivationId.String()[:32]), snapshotBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := `INSERT INTO usage(id, activation_id, snapshot)
|
||||
VALUES ($1, $2, $3)`
|
||||
_, err = r.db.ExecContext(ctx,
|
||||
query,
|
||||
usage.Id,
|
||||
usage.ActivationId,
|
||||
string(encryptedSnapshot),
|
||||
)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error inserting usage data: %v", zap.Error(err))
|
||||
return fmt.Errorf("failed to insert usage in db: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) MoveToSynced(ctx context.Context, id uuid.UUID) error {
|
||||
|
||||
query := `UPDATE usage
|
||||
SET synced = 'true',
|
||||
synced_at = $1
|
||||
WHERE id = $2`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, query, time.Now(), id)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
|
||||
return fmt.Errorf("failed to update usage in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) IncrementFailedRequestCount(ctx context.Context, id uuid.UUID) error {
|
||||
|
||||
query := `UPDATE usage SET failed_sync_request_count = failed_sync_request_count + 1 WHERE id = $1`
|
||||
_, err := r.db.ExecContext(ctx, query, id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
|
||||
return fmt.Errorf("failed to update usage in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetSnapshotsNotSynced(ctx context.Context) ([]*model.Usage, error) {
|
||||
snapshots := []*model.Usage{}
|
||||
|
||||
query := `SELECT id,created_at, activation_id, snapshot, failed_sync_request_count from usage where synced!='true' and failed_sync_request_count < $1 order by created_at asc `
|
||||
|
||||
err := r.db.SelectContext(ctx, &snapshots, query, MaxFailedSyncCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
func (r *Repository) DropOldSnapshots(ctx context.Context) error {
|
||||
query := `delete from usage where created_at <= $1`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, query, time.Now().Add(-(SnapShotLife)))
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to remove old snapshots from db: %v", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckSnapshotGtCreatedAt checks if there is any snapshot greater than the provided timestamp
|
||||
func (r *Repository) CheckSnapshotGtCreatedAt(ctx context.Context, ts time.Time) (bool, error) {
|
||||
|
||||
var snapshots uint64
|
||||
query := `SELECT count() from usage where created_at > '$1'`
|
||||
|
||||
err := r.db.QueryRowContext(ctx, query, ts).Scan(&snapshots)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return snapshots > 0, err
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS usage(
|
||||
id UUID PRIMARY KEY,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
activation_id UUID,
|
||||
snapshot TEXT,
|
||||
synced BOOLEAN DEFAULT 'false',
|
||||
synced_at TIMESTAMP,
|
||||
failed_sync_request_count INTEGER DEFAULT 0
|
||||
);
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in creating usage table: %v", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"presets": [
|
||||
"@babel/preset-env",
|
||||
"@babel/preset-react",
|
||||
["@babel/preset-react", { "runtime": "automatic" }],
|
||||
"@babel/preset-typescript"
|
||||
],
|
||||
"plugins": [
|
||||
|
||||
@@ -16,6 +16,7 @@ module.exports = {
|
||||
'plugin:sonarjs/recommended',
|
||||
'plugin:import/errors',
|
||||
'plugin:import/warnings',
|
||||
'plugin:react/jsx-runtime',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
@@ -58,7 +59,7 @@ module.exports = {
|
||||
'react/no-array-index-key': 'error',
|
||||
'linebreak-style': [
|
||||
'error',
|
||||
process.platform === 'win32' ? 'windows' : 'unix',
|
||||
process.env.platform === 'win32' ? 'windows' : 'unix',
|
||||
],
|
||||
'@typescript-eslint/default-param-last': 'off',
|
||||
|
||||
@@ -102,9 +103,10 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
'func-style': ['error', 'declaration', { allowArrowFunctions: true }],
|
||||
'arrow-body-style': ['error', 'as-needed'],
|
||||
|
||||
// eslint rules need to remove
|
||||
'no-shadow': 'off',
|
||||
'@typescript-eslint/no-shadow': 'off',
|
||||
'import/no-cycle': 'off',
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && npm run commitlint
|
||||
cd frontend && yarn run commitlint --edit $1
|
||||
|
||||
2
frontend/.yarnrc
Normal file
2
frontend/.yarnrc
Normal file
@@ -0,0 +1,2 @@
|
||||
network-timeout 600000
|
||||
save-prefix ""
|
||||
56
frontend/CONTRIBUTIONS.md
Normal file
56
frontend/CONTRIBUTIONS.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# **Frontend Guidelines**
|
||||
|
||||
Embrace the spirit of collaboration and contribute to the success of our open-source project by adhering to these frontend development guidelines with precision and passion.
|
||||
|
||||
### React and Components
|
||||
|
||||
- Strive to create small and modular components, ensuring they are divided into individual pieces for improved maintainability and reusability.
|
||||
- Avoid passing inline objects or functions as props to React components, as they are recreated with each render cycle.
|
||||
Utilize careful memoization of functions and variables, balancing optimization efforts to prevent potential performance issues. [When to useMemo and useCallback](https://kentcdodds.com/blog/usememo-and-usecallback) by Kent C. Dodds is quite helpful for this scenario.
|
||||
- Minimize the use of inline functions whenever possible to enhance code readability and improve overall comprehension.
|
||||
- Employ the appropriate usage of useMemo and useCallback hooks for effective memoization of values and functions.
|
||||
- Determine the appropriate placement of components:
|
||||
- Pages should contain an aggregation of all components and containers.
|
||||
- Commonly used components should reside in the 'components' directory.
|
||||
- Parent components responsible for data manipulation should be placed in the 'container' directory.
|
||||
- Strategically decide where to store data, either in global state or local components:
|
||||
- Begin by storing data in local components and gradually transition to global state as necessary.
|
||||
- Avoid importing default namespace `React` as the project is using `v18` and `import React from 'react'` is not needed anymore.
|
||||
- When a function requires more than three arguments (except when memoized), encapsulate them within an object to enhance readability and reduce potential parameter complexity.
|
||||
|
||||
### API and Services
|
||||
|
||||
- Avoid incorporating business logic within API/Service files to maintain flexibility for consumers to handle it according to their specific needs.
|
||||
- Employ the use of the useQuery hook for fetching data and the useMutation hook for updating data, ensuring a consistent and efficient approach.
|
||||
- Utilize the useQueryClient hook when updating the cache, facilitating smooth and effective management of data within the application.
|
||||
|
||||
**Note -** In our project, we are utilizing React Query v3. To gain a comprehensive understanding of its features and implementation, we recommend referring to the [official documentation](https://tanstack.com/query/v3/docs/react/overview) as a valuable resource.
|
||||
|
||||
### Styling
|
||||
|
||||
- Refrain from using inline styling within React components to maintain separation of concerns and promote a more maintainable codebase.
|
||||
- Opt for using the rem unit instead of px values to ensure better scalability and responsiveness across different devices and screen sizes.
|
||||
|
||||
### Linting and Setup
|
||||
|
||||
- It is crucial to refrain from disabling ESLint and TypeScript errors within the project. If there is a specific rule that needs to be disabled, provide a clear and justified explanation for doing so. Maintaining the integrity of the linting and type-checking processes ensures code quality and consistency throughout the codebase.
|
||||
- In our project, we rely on several essential ESLint plugins, namely:
|
||||
- [plugin:@typescript-eslint](https://typescript-eslint.io/rules/)
|
||||
- [airbnb styleguide](https://github.com/airbnb/javascript)
|
||||
- [plugin:sonarjs](https://github.com/SonarSource/eslint-plugin-sonarjs)
|
||||
|
||||
To ensure compliance with our coding standards and best practices, we encourage you to refer to the documentation of these plugins. Familiarizing yourself with the ESLint rules they provide will help maintain code quality and consistency throughout the project.
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Ensure that component names are written in Capital Case, while the folder names should be in lowercase.
|
||||
- Keep all other elements, such as variables, functions, and file names, in lowercase.
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
- Ensure that functions are modularized and follow the Single Responsibility Principle (SRP). The function's name should accurately convey its purpose and functionality.
|
||||
- Semantic division of functions into smaller units should be prioritized for improved readability and maintainability.
|
||||
Aim to keep functions concise and avoid exceeding a maximum length of 40 lines to enhance code understandability and ease of maintenance.
|
||||
- Eliminate the use of hard-coded strings or enums, favoring a more flexible and maintainable approach.
|
||||
- Strive to internationalize all strings within the codebase to support localization and improve accessibility for users across different languages.
|
||||
- Minimize the usage of multiple if statements or switch cases within a function. Consider creating a mapper and separating logic into multiple functions for better code organization.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Builder stage
|
||||
FROM node:16.15.0-slim as builder
|
||||
FROM node:16.15.0 as builder
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -9,8 +9,11 @@ ARG TARGETARCH
|
||||
|
||||
WORKDIR /frontend
|
||||
|
||||
# Copy the package.json to install dependencies
|
||||
# Copy the package.json and .yarnrc files prior to install dependencies
|
||||
COPY package.json ./
|
||||
# Copy lock file
|
||||
COPY yarn.lock ./
|
||||
COPY .yarnrc ./
|
||||
|
||||
# Install the dependencies and make the folder
|
||||
RUN CI=1 yarn install
|
||||
|
||||
@@ -15,7 +15,7 @@ const config: Config.InitialOptions = {
|
||||
useESM: true,
|
||||
},
|
||||
},
|
||||
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
||||
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
|
||||
preset: 'ts-jest/presets/js-with-ts-esm',
|
||||
transform: {
|
||||
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||
@@ -25,6 +25,7 @@ const config: Config.InitialOptions = {
|
||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
moduleDirectories: ['node_modules', 'src'],
|
||||
testEnvironment: 'jest-environment-jsdom',
|
||||
testEnvironmentOptions: {
|
||||
'jest-playwright': {
|
||||
browsers: ['chromium', 'firefox', 'webkit'],
|
||||
|
||||
@@ -1,5 +1,20 @@
|
||||
/* eslint-disable @typescript-eslint/explicit-function-return-type */
|
||||
/* eslint-disable object-shorthand */
|
||||
/* eslint-disable func-names */
|
||||
|
||||
/**
|
||||
* Adds custom matchers from the react testing library to all tests
|
||||
*/
|
||||
import '@testing-library/jest-dom';
|
||||
import 'jest-styled-components';
|
||||
|
||||
// Mock window.matchMedia
|
||||
window.matchMedia =
|
||||
window.matchMedia ||
|
||||
function (): any {
|
||||
return {
|
||||
matches: false,
|
||||
addListener: function () {},
|
||||
removeListener: function () {},
|
||||
};
|
||||
};
|
||||
|
||||
@@ -27,16 +27,14 @@
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ant-design/colors": "^6.0.0",
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@ant-design/colors": "6.0.0",
|
||||
"@ant-design/icons": "4.8.0",
|
||||
"@grafana/data": "^8.4.3",
|
||||
"@monaco-editor/react": "^4.3.1",
|
||||
"@testing-library/jest-dom": "^5.11.4",
|
||||
"@testing-library/react": "^11.1.0",
|
||||
"@testing-library/user-event": "^12.1.10",
|
||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||
"@xstate/react": "^3.0.0",
|
||||
"antd": "4.19.2",
|
||||
"ansi-to-html": "0.7.2",
|
||||
"antd": "5.0.5",
|
||||
"antd-table-saveas-excel": "2.2.1",
|
||||
"axios": "^0.21.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^26.6.0",
|
||||
@@ -44,21 +42,19 @@
|
||||
"babel-plugin-named-asset-import": "^0.3.7",
|
||||
"babel-preset-minify": "^0.5.1",
|
||||
"babel-preset-react-app": "^10.0.0",
|
||||
"chart.js": "^3.4.0",
|
||||
"chart.js": "3.9.1",
|
||||
"chartjs-adapter-date-fns": "^2.0.0",
|
||||
"chartjs-plugin-annotation": "^1.4.0",
|
||||
"color": "^4.2.1",
|
||||
"cross-env": "^7.0.3",
|
||||
"css-loader": "4.3.0",
|
||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||
"d3": "^6.2.0",
|
||||
"d3-flame-graph": "^3.1.1",
|
||||
"d3-tip": "^0.9.1",
|
||||
"dayjs": "^1.10.7",
|
||||
"dompurify": "3.0.0",
|
||||
"dotenv": "8.2.0",
|
||||
"event-source-polyfill": "1.0.31",
|
||||
"file-loader": "6.1.1",
|
||||
"flat": "^5.0.2",
|
||||
"fontfaceobserver": "2.3.0",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.1.0",
|
||||
"i18next": "^21.6.12",
|
||||
@@ -70,17 +66,19 @@
|
||||
"less-loader": "^10.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
"mini-css-extract-plugin": "2.4.5",
|
||||
"react": "17.0.0",
|
||||
"react-dom": "17.0.0",
|
||||
"papaparse": "5.4.1",
|
||||
"react": "18.2.0",
|
||||
"react-dom": "18.2.0",
|
||||
"react-drag-listview": "2.0.0",
|
||||
"react-force-graph": "^1.41.0",
|
||||
"react-graph-vis": "^1.0.5",
|
||||
"react-grid-layout": "^1.3.4",
|
||||
"react-i18next": "^11.16.1",
|
||||
"react-intersection-observer": "9.4.1",
|
||||
"react-query": "^3.34.19",
|
||||
"react-redux": "^7.2.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-use": "^17.3.2",
|
||||
"react-vis": "^1.11.7",
|
||||
"react-virtuoso": "4.0.3",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"stream": "^0.0.2",
|
||||
@@ -120,31 +118,33 @@
|
||||
"@commitlint/config-conventional": "^16.2.4",
|
||||
"@jest/globals": "^27.5.1",
|
||||
"@playwright/test": "^1.22.0",
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@testing-library/jest-dom": "5.16.5",
|
||||
"@testing-library/react": "13.4.0",
|
||||
"@testing-library/user-event": "14.4.3",
|
||||
"@types/color": "^3.0.3",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/d3-tip": "^3.5.5",
|
||||
"@types/dompurify": "^2.4.0",
|
||||
"@types/event-source-polyfill": "^1.0.0",
|
||||
"@types/flat": "^5.0.2",
|
||||
"@types/fontfaceobserver": "2.1.0",
|
||||
"@types/jest": "^27.5.1",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/mini-css-extract-plugin": "^2.5.1",
|
||||
"@types/node": "^16.10.3",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/papaparse": "5.3.7",
|
||||
"@types/react": "18.0.26",
|
||||
"@types/react-dom": "18.0.10",
|
||||
"@types/react-grid-layout": "^1.1.2",
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-resizable": "3.0.3",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/redux": "^3.6.0",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/uuid": "^8.3.1",
|
||||
"@types/vis": "^4.21.21",
|
||||
"@types/webpack": "^5.28.0",
|
||||
"@types/webpack-dev-server": "^4.3.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
||||
"@typescript-eslint/parser": "^4.28.2",
|
||||
"@welldone-software/why-did-you-render": "6.2.1",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "9.0.0",
|
||||
@@ -169,11 +169,12 @@
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.0",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "^12.3.7",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"prettier": "2.2.1",
|
||||
"react-hooks-testing-library": "0.6.0",
|
||||
"react-hot-loader": "^4.13.0",
|
||||
"react-resizable": "3.0.4",
|
||||
"ts-jest": "^27.1.4",
|
||||
"ts-node": "^10.2.1",
|
||||
"typescript-plugin-css-modules": "^3.4.0",
|
||||
@@ -186,7 +187,7 @@
|
||||
]
|
||||
},
|
||||
"resolutions": {
|
||||
"@types/react": "17.0.0",
|
||||
"@types/react-dom": "17.0.0"
|
||||
"@types/react": "18.0.26",
|
||||
"@types/react-dom": "18.0.10"
|
||||
}
|
||||
}
|
||||
|
||||
10
frontend/public/css/antd.dark.min.css
vendored
10
frontend/public/css/antd.dark.min.css
vendored
File diff suppressed because one or more lines are too long
10
frontend/public/css/antd.min.css
vendored
10
frontend/public/css/antd.min.css
vendored
File diff suppressed because one or more lines are too long
11
frontend/public/locales/en-GB/trace.json
Normal file
11
frontend/public/locales/en-GB/trace.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"options_menu": {
|
||||
"options": "Options",
|
||||
"format": "Format",
|
||||
"raw": "Raw",
|
||||
"default": "Default",
|
||||
"column": "Column",
|
||||
"maxLines": "Max lines per Row",
|
||||
"addColumn": "Add a column"
|
||||
}
|
||||
}
|
||||
3
frontend/public/locales/en-GB/traceDetails.json
Normal file
3
frontend/public/locales/en-GB/traceDetails.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"search_tags": "Search Tag Names"
|
||||
}
|
||||
11
frontend/public/locales/en/trace.json
Normal file
11
frontend/public/locales/en/trace.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"options_menu": {
|
||||
"options": "Options",
|
||||
"format": "Format",
|
||||
"raw": "Raw",
|
||||
"default": "Default",
|
||||
"column": "Column",
|
||||
"maxLines": "Max lines per Row",
|
||||
"addColumn": "Add a column"
|
||||
}
|
||||
}
|
||||
3
frontend/public/locales/en/traceDetails.json
Normal file
3
frontend/public/locales/en/traceDetails.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"search_tags": "Search Tag Names"
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
"release_notes": "Release Notes",
|
||||
"read_how_to_upgrade": "Read instructions on how to upgrade",
|
||||
"latest_version_signoz": "You are running the latest version of SigNoz.",
|
||||
"stale_version": "You are on an older version and may be losing out on the latest features we have shipped. We recommend to upgrade to the latest version",
|
||||
"stale_version": "You are on an older version and may be missing out on the latest features we have shipped. We recommend to upgrade to the latest version",
|
||||
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
|
||||
"n_a": "N/A",
|
||||
"routes": {
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
/* eslint-disable react-hooks/exhaustive-deps */
|
||||
import { notification } from 'antd';
|
||||
import getLocalStorageApi from 'api/browser/localstorage/get';
|
||||
import loginApi from 'api/user/login';
|
||||
import { Logout } from 'api/utils';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import history from 'lib/history';
|
||||
import React, { useEffect, useMemo } from 'react';
|
||||
import { ReactChild, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
import { matchPath, Redirect, useLocation } from 'react-router-dom';
|
||||
@@ -47,6 +47,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
|
||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
||||
|
||||
const { notifications } = useNotifications();
|
||||
|
||||
const currentRoute = mapRoutes.get('current');
|
||||
|
||||
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
|
||||
@@ -106,7 +108,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
} else {
|
||||
Logout();
|
||||
|
||||
notification.error({
|
||||
notifications.error({
|
||||
message: response.error || t('something_went_wrong'),
|
||||
});
|
||||
}
|
||||
@@ -159,7 +161,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
}
|
||||
|
||||
interface PrivateRouteProps {
|
||||
children: React.ReactChild;
|
||||
children: ReactChild;
|
||||
}
|
||||
|
||||
export default PrivateRoute;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user