Compare commits
1070 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
622e1765cf | ||
|
|
faaf0a6e73 | ||
|
|
4542a51531 | ||
|
|
191a538430 | ||
|
|
e6ce80213b | ||
|
|
3115b32dcd | ||
|
|
af272a368b | ||
|
|
b336a6cb45 | ||
|
|
b72815ca2f | ||
|
|
ed4a01dea6 | ||
|
|
1914c3b4a0 | ||
|
|
3811e96e23 | ||
|
|
8d16493432 | ||
|
|
db2bfbb887 | ||
|
|
213838a021 | ||
|
|
fd6f9a90e1 | ||
|
|
13f9922c53 | ||
|
|
a654baaa5b | ||
|
|
f766435acc | ||
|
|
d7a65ba689 | ||
|
|
05ce03e67d | ||
|
|
ba6818f487 | ||
|
|
ca53136cbf | ||
|
|
c46bef321c | ||
|
|
ba8f804b26 | ||
|
|
6cc7025e37 | ||
|
|
e62e541fc4 | ||
|
|
2f1ca93eda | ||
|
|
f1c7d72fc5 | ||
|
|
a405307c96 | ||
|
|
c85d48d7fa | ||
|
|
75470f6bb9 | ||
|
|
f75e688b32 | ||
|
|
5f3ca045df | ||
|
|
186632af69 | ||
|
|
fa652be926 | ||
|
|
1e39131c38 | ||
|
|
153e859ac3 | ||
|
|
d1cc29e118 | ||
|
|
972bf94dd0 | ||
|
|
3632208d45 | ||
|
|
cd9768c738 | ||
|
|
f01b9605db | ||
|
|
eec236af50 | ||
|
|
bbff2b459e | ||
|
|
d9535e7a8d | ||
|
|
a82bbe1a72 | ||
|
|
6812f55152 | ||
|
|
83163c17cd | ||
|
|
5ed7c9a46e | ||
|
|
2f323056d0 | ||
|
|
51b583480b | ||
|
|
7b1e2c8b98 | ||
|
|
b87f3bdb50 | ||
|
|
2f5908a3dd | ||
|
|
ca77820e9d | ||
|
|
a4346a2d93 | ||
|
|
44360ecacf | ||
|
|
b675c3cfec | ||
|
|
b23d8da96c | ||
|
|
215ea8d819 | ||
|
|
0c27d5acbc | ||
|
|
435d74c37e | ||
|
|
b35bdf01cc | ||
|
|
9b654143bb | ||
|
|
4841f150f4 | ||
|
|
16a49a8b04 | ||
|
|
1fd819b806 | ||
|
|
cab9e04cdd | ||
|
|
e8f341b850 | ||
|
|
1f6fcb9b8c | ||
|
|
1c7202b5bf | ||
|
|
24ac062bf5 | ||
|
|
b776bf5b09 | ||
|
|
144076e029 | ||
|
|
835251b342 | ||
|
|
ebbad5812f | ||
|
|
7b86022280 | ||
|
|
da1fd4b0cd | ||
|
|
57d28be9f5 | ||
|
|
126c9238ba | ||
|
|
31a3bc09c8 | ||
|
|
6ba5c0ecad | ||
|
|
27cd514fa5 | ||
|
|
f0e13784e5 | ||
|
|
742ceac32c | ||
|
|
545d46c39c | ||
|
|
d134e4f4d9 | ||
|
|
e03b0aa45f | ||
|
|
46e131698e | ||
|
|
d1ee15c372 | ||
|
|
1e035be978 | ||
|
|
88a97fc4b8 | ||
|
|
2e58f6db7a | ||
|
|
1916fc87b0 | ||
|
|
d8882acdd7 | ||
|
|
7f42b39684 | ||
|
|
b11f79b4c7 | ||
|
|
c717e39a1a | ||
|
|
c3253687d0 | ||
|
|
895c721b37 | ||
|
|
35f5fb6957 | ||
|
|
40ec4517c2 | ||
|
|
48a6f536fa | ||
|
|
13a6d7f7c6 | ||
|
|
8b6ed0f951 | ||
|
|
eef48c54f8 | ||
|
|
aad962d07d | ||
|
|
18bbb3cf36 | ||
|
|
a3455fb553 | ||
|
|
ece2988d0d | ||
|
|
db704b212d | ||
|
|
4b13b0a8a4 | ||
|
|
6f6499c267 | ||
|
|
3dcb44a758 | ||
|
|
0595cdc7af | ||
|
|
092c02762f | ||
|
|
d1d2829d2b | ||
|
|
ac446294e7 | ||
|
|
1cceab4d5e | ||
|
|
02898d14f9 | ||
|
|
09af6c262c | ||
|
|
faeaeb61a0 | ||
|
|
9c80ba6b78 | ||
|
|
dbba8b5b55 | ||
|
|
58ce838023 | ||
|
|
5260b152f5 | ||
|
|
f2dd254d83 | ||
|
|
82d53fa45c | ||
|
|
c38d1c150d | ||
|
|
16170eacc0 | ||
|
|
66ddbfc085 | ||
|
|
2715ab61a4 | ||
|
|
4d291e92b9 | ||
|
|
1b73649f8e | ||
|
|
0abae1c09c | ||
|
|
4d02603aed | ||
|
|
c58e43a678 | ||
|
|
b77bbe1e4f | ||
|
|
d4eb241c04 | ||
|
|
98e1a77a43 | ||
|
|
498b04491b | ||
|
|
4e58414cc2 | ||
|
|
67943cfec0 | ||
|
|
f170eb1b23 | ||
|
|
6931b18382 | ||
|
|
8a9d6f664a | ||
|
|
8affe8df31 | ||
|
|
1c8626e933 | ||
|
|
87932de668 | ||
|
|
1b52edb056 | ||
|
|
5a81557df7 | ||
|
|
8bb3eefeb5 | ||
|
|
a46f074e22 | ||
|
|
88fa3b7699 | ||
|
|
7f77bcca2b | ||
|
|
ab5311caac | ||
|
|
8aae9f53a9 | ||
|
|
18d80d47e5 | ||
|
|
8e5522820c | ||
|
|
5ae9557293 | ||
|
|
7e590f4bfb | ||
|
|
ce072bdc3f | ||
|
|
67c0c9032f | ||
|
|
6c9036fbf4 | ||
|
|
d06d41af87 | ||
|
|
2771d2e774 | ||
|
|
0cbba071ea | ||
|
|
7cec2db503 | ||
|
|
4b3829fd5b | ||
|
|
983ca1ec6a | ||
|
|
33d34af2a6 | ||
|
|
b0ec619881 | ||
|
|
220f848b04 | ||
|
|
4727dbc9f0 | ||
|
|
00863e54de | ||
|
|
e9c47a6a73 | ||
|
|
88af456915 | ||
|
|
7ebc94c273 | ||
|
|
d5bd991417 | ||
|
|
4c0d573760 | ||
|
|
1273bb5865 | ||
|
|
87502baabf | ||
|
|
90a6313423 | ||
|
|
4a244ad7b2 | ||
|
|
db105af89f | ||
|
|
b8c58a9812 | ||
|
|
78d2377520 | ||
|
|
549535d09e | ||
|
|
ac4d35c6c0 | ||
|
|
ad34c6e25f | ||
|
|
c306701bab | ||
|
|
fcc725c6e6 | ||
|
|
d615d7a9e3 | ||
|
|
622943645f | ||
|
|
355264a43e | ||
|
|
2c7deca2ec | ||
|
|
e558dcae3a | ||
|
|
4cf3dc2ec3 | ||
|
|
2e124da366 | ||
|
|
a50d7f227c | ||
|
|
73706d872f | ||
|
|
0480197914 | ||
|
|
65af8c1b98 | ||
|
|
a3b03ef0ca | ||
|
|
9735a6e5ce | ||
|
|
674883cd18 | ||
|
|
36315fcf9c | ||
|
|
46050a217c | ||
|
|
c9363586e1 | ||
|
|
5eed384ffe | ||
|
|
1b152c19ec | ||
|
|
6a3c1c10fb | ||
|
|
f580bedb1c | ||
|
|
acd15af823 | ||
|
|
134c5dc1d2 | ||
|
|
57f4f098f7 | ||
|
|
fce4496214 | ||
|
|
4e38f1dcc0 | ||
|
|
fe0f305ea7 | ||
|
|
1374444f36 | ||
|
|
fe0a4ab0cb | ||
|
|
f2f2069835 | ||
|
|
90d0c72aa2 | ||
|
|
90d1a87027 | ||
|
|
9c4521b34a | ||
|
|
106033c296 | ||
|
|
9372f763c8 | ||
|
|
3bbe2f4f58 | ||
|
|
1b1fb2f13b | ||
|
|
a94bd9b99b | ||
|
|
dcf2ac15b0 | ||
|
|
9e9924943e | ||
|
|
2c9794a6c6 | ||
|
|
6b6f494574 | ||
|
|
a3f11184e4 | ||
|
|
cc3f36b62b | ||
|
|
450602cd72 | ||
|
|
7088c22318 | ||
|
|
b9af7e7ff3 | ||
|
|
00389271cf | ||
|
|
adda2e8a11 | ||
|
|
ed2bbc5035 | ||
|
|
f1fdf78dc5 | ||
|
|
745fd07bd8 | ||
|
|
05de0ccba5 | ||
|
|
c9139c5236 | ||
|
|
0806397816 | ||
|
|
c43dabdb0b | ||
|
|
eaadc3bb95 | ||
|
|
1ec9248975 | ||
|
|
0ccd7777bf | ||
|
|
ac86d840f9 | ||
|
|
8556c87d46 | ||
|
|
461a15d52d | ||
|
|
9e6d9019f7 | ||
|
|
578dafd1ff | ||
|
|
99c0c97c1e | ||
|
|
4875652ecb | ||
|
|
d170515d4d | ||
|
|
73b00f405b | ||
|
|
ea8bd7047f | ||
|
|
596daefa7e | ||
|
|
7081e4ffce | ||
|
|
07dcdb51f7 | ||
|
|
d2e990ebf4 | ||
|
|
c2f95dc727 | ||
|
|
37dedc8b87 | ||
|
|
9cd1be6553 | ||
|
|
5e0eb05a9c | ||
|
|
f48a884f90 | ||
|
|
32fba00aa8 | ||
|
|
166e5612eb | ||
|
|
b23d63cb2b | ||
|
|
5e0ed6f5f5 | ||
|
|
74f947a028 | ||
|
|
cca74e5926 | ||
|
|
1865d75df6 | ||
|
|
d4b0013900 | ||
|
|
55c9eb733d | ||
|
|
54cc363752 | ||
|
|
07d013a716 | ||
|
|
a3015d1077 | ||
|
|
66b67a08a0 | ||
|
|
7a4750a882 | ||
|
|
6d623c5d45 | ||
|
|
6e899175a0 | ||
|
|
aecf3ef93e | ||
|
|
b6afc9315b | ||
|
|
dda82474ae | ||
|
|
998e72374f | ||
|
|
a1f6f09ae1 | ||
|
|
7a1cbdb0bb | ||
|
|
eb28ece680 | ||
|
|
fda6e4472a | ||
|
|
9de99d1872 | ||
|
|
8f9d0f2403 | ||
|
|
04cf1b2697 | ||
|
|
8bdc41bef0 | ||
|
|
616da88790 | ||
|
|
143a5b65f9 | ||
|
|
0807a0ae26 | ||
|
|
1ebf64589f | ||
|
|
80c96af5a4 | ||
|
|
61ebd3aded | ||
|
|
425b732370 | ||
|
|
a742c9aee1 | ||
|
|
3968f11b3d | ||
|
|
5bfc2af51b | ||
|
|
8146da52af | ||
|
|
5dc6d28f2e | ||
|
|
a6ed6c03c1 | ||
|
|
cca4db602c | ||
|
|
7ff49ba47c | ||
|
|
9dcf913a74 | ||
|
|
68194d7e07 | ||
|
|
7881aee350 | ||
|
|
594bfc256c | ||
|
|
5894acdb2d | ||
|
|
6eb9389e81 | ||
|
|
39be8201aa | ||
|
|
6778163a07 | ||
|
|
56a2047560 | ||
|
|
023ef66035 | ||
|
|
22b8572495 | ||
|
|
e39d2f799d | ||
|
|
5b28fe1c9d | ||
|
|
d15f9a1709 | ||
|
|
2c383528bc | ||
|
|
0378dfd12f | ||
|
|
002ccc3975 | ||
|
|
f8f903848e | ||
|
|
047227ad18 | ||
|
|
7b6a086b37 | ||
|
|
baf72610d6 | ||
|
|
a5388d357c | ||
|
|
294d527a0e | ||
|
|
a23788852a | ||
|
|
a771c3b9a6 | ||
|
|
0fe4327877 | ||
|
|
4825ed6e5f | ||
|
|
2f17898390 | ||
|
|
373cbbc375 | ||
|
|
f8be4a6d5b | ||
|
|
420d46ab01 | ||
|
|
bdb6901c74 | ||
|
|
94cde11164 | ||
|
|
448e14b32f | ||
|
|
6ac7cb1022 | ||
|
|
2132d1059c | ||
|
|
ff9c41464b | ||
|
|
acb3721815 | ||
|
|
5912d3a4a0 | ||
|
|
a527c33c7d | ||
|
|
c24bdfc8cf | ||
|
|
051f640100 | ||
|
|
b5c8764605 | ||
|
|
475c44a000 | ||
|
|
1b6597b974 | ||
|
|
8e4fbbe770 | ||
|
|
2450fff34d | ||
|
|
df17d4ca54 | ||
|
|
33e7252645 | ||
|
|
2e9affa80c | ||
|
|
bf2f3f8f5e | ||
|
|
d92aad38df | ||
|
|
78d13c94ae | ||
|
|
73e699080d | ||
|
|
b6aa378fae | ||
|
|
b5fe3d7fa1 | ||
|
|
7c14a75c68 | ||
|
|
10c6325e46 | ||
|
|
e4883495c3 | ||
|
|
b9d63d6b8f | ||
|
|
65804f245c | ||
|
|
964b819f20 | ||
|
|
e22be60a9e | ||
|
|
b6a6833a64 | ||
|
|
c90e9ffa34 | ||
|
|
c5c7fb238f | ||
|
|
4ad79bee18 | ||
|
|
bebfaa1c4c | ||
|
|
6fb7e34dbc | ||
|
|
a2e1c41343 | ||
|
|
46f258747a | ||
|
|
11c352741d | ||
|
|
a63267cf90 | ||
|
|
3200248e98 | ||
|
|
a8c7237bbb | ||
|
|
3a287b2b16 | ||
|
|
c3d665e119 | ||
|
|
8d03569a0a | ||
|
|
0620cacb0b | ||
|
|
7da69f6a75 | ||
|
|
7aeaecaf1f | ||
|
|
3ea36092f6 | ||
|
|
8db4793ad6 | ||
|
|
83f3180641 | ||
|
|
64e638fd58 | ||
|
|
5554cce379 | ||
|
|
3e2a6df200 | ||
|
|
3dc1dc970f | ||
|
|
0ceaa56679 | ||
|
|
ab52538e91 | ||
|
|
ef69505bf9 | ||
|
|
61b79742dc | ||
|
|
4d1516e3fc | ||
|
|
0b08c80038 | ||
|
|
a84754e8a8 | ||
|
|
a09a4c264e | ||
|
|
ed5d217c76 | ||
|
|
54e09e1292 | ||
|
|
8477aebc8e | ||
|
|
d7f7f20520 | ||
|
|
ef141d2cee | ||
|
|
4ee92b7c55 | ||
|
|
80c80b2180 | ||
|
|
da368ab5e8 | ||
|
|
091d769ad8 | ||
|
|
be814afeea | ||
|
|
6697702c0f | ||
|
|
5e93f266c1 | ||
|
|
352c7ac581 | ||
|
|
db8f35cca2 | ||
|
|
3af1d2b5bb | ||
|
|
58038c222f | ||
|
|
b3b5459a08 | ||
|
|
1bfc9877fc | ||
|
|
8ce806169f | ||
|
|
3c7e0f66fa | ||
|
|
cbdeb5ad03 | ||
|
|
5bbe1246cc | ||
|
|
789d65d7c4 | ||
|
|
5cbc8af4af | ||
|
|
2cffe0c53e | ||
|
|
cf0eb44143 | ||
|
|
d7ce786f4b | ||
|
|
ae5d4326a2 | ||
|
|
6fa0209104 | ||
|
|
50501ea80f | ||
|
|
669dc05eec | ||
|
|
e839920b3b | ||
|
|
32b4bbcaec | ||
|
|
58b0c08d71 | ||
|
|
dd9cbcee33 | ||
|
|
9c3b4508be | ||
|
|
450407d0bf | ||
|
|
276b26b170 | ||
|
|
475723a03a | ||
|
|
897728cc71 | ||
|
|
bdf78cbf2c | ||
|
|
e88cfcd4da | ||
|
|
90566360ae | ||
|
|
0a6fa0ee85 | ||
|
|
ba35b3e442 | ||
|
|
73c2137cd7 | ||
|
|
dbe68c064c | ||
|
|
ba7427f280 | ||
|
|
6dbc11991b | ||
|
|
eeae71163c | ||
|
|
a25e7a64ce | ||
|
|
d0e272b679 | ||
|
|
16ff59b4de | ||
|
|
f2074a9d0e | ||
|
|
47e6e00a64 | ||
|
|
282c47def8 | ||
|
|
9d3fc493a3 | ||
|
|
b2afb9aabc | ||
|
|
a733adad2c | ||
|
|
cc18cc9087 | ||
|
|
ecb2ed8ac8 | ||
|
|
31931e5a6c | ||
|
|
31848c488d | ||
|
|
bdcc997672 | ||
|
|
d68334b2ca | ||
|
|
3ebded66ea | ||
|
|
2ed24df250 | ||
|
|
5a34ce2221 | ||
|
|
aae6a1adf1 | ||
|
|
bef83d30cc | ||
|
|
1ebf3dbf65 | ||
|
|
f57808bdb4 | ||
|
|
6bdcd4f5bb | ||
|
|
d726ad9ca6 | ||
|
|
4ed3295b80 | ||
|
|
72dc4d62ce | ||
|
|
186f4dca71 | ||
|
|
e4f2219f8c | ||
|
|
fe9a6c2448 | ||
|
|
5c2a875211 | ||
|
|
6dab77409d | ||
|
|
0f811af34e | ||
|
|
bdbcbb5f6c | ||
|
|
ae91d7e8a9 | ||
|
|
64927acd97 | ||
|
|
9dae957c8f | ||
|
|
afbcde5edc | ||
|
|
b8c3fd1cbf | ||
|
|
93cf5dfa46 | ||
|
|
d2c28a47c2 | ||
|
|
9c68c6af93 | ||
|
|
3771f85c7d | ||
|
|
b39e0465b0 | ||
|
|
bc97ea8fc0 | ||
|
|
1e980c3886 | ||
|
|
5ec52f03ad | ||
|
|
4aab923e40 | ||
|
|
17b0ee5434 | ||
|
|
08c3c4c51c | ||
|
|
5f802e0e20 | ||
|
|
63e663a92d | ||
|
|
d21ab7b82d | ||
|
|
84b876170d | ||
|
|
88d8dba90e | ||
|
|
d7d0d70aa5 | ||
|
|
671b441ec9 | ||
|
|
729c7fce7b | ||
|
|
224ec8d0d9 | ||
|
|
7eed865660 | ||
|
|
241121ebec | ||
|
|
15af158a9c | ||
|
|
2f02aeb031 | ||
|
|
3603e497a6 | ||
|
|
070d32a0ef | ||
|
|
0b36da714f | ||
|
|
ce0ac1e3af | ||
|
|
bcb5256de0 | ||
|
|
fdca72b9b2 | ||
|
|
7f64dfd023 | ||
|
|
8871d53ae0 | ||
|
|
2313ec3f9a | ||
|
|
56208c9b06 | ||
|
|
84e281271c | ||
|
|
43e4f637d1 | ||
|
|
c156b9c403 | ||
|
|
9885572842 | ||
|
|
4803fd9c8e | ||
|
|
c2fe35388e | ||
|
|
ba5e3dcfd3 | ||
|
|
9c8c31d912 | ||
|
|
469254e9fc | ||
|
|
1f2ec0d728 | ||
|
|
ff1fc83b66 | ||
|
|
0a5eff2255 | ||
|
|
24e84bac2a | ||
|
|
db00a78a4e | ||
|
|
4d2e8b0ea5 | ||
|
|
4f12f8c85c | ||
|
|
fabab345cb | ||
|
|
00355b3383 | ||
|
|
c16ae790d4 | ||
|
|
c6d57a7a53 | ||
|
|
d8775c91d7 | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
b3dfd567e0 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
660391c360 | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
8a0bcf6cd9 | ||
|
|
0c06c5ee0e | ||
|
|
f3610ffe55 | ||
|
|
d150cfa46c | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
3995de16f0 | ||
|
|
f149258de2 | ||
|
|
da386b0e8e | ||
|
|
75cdac376f | ||
|
|
0ef13a89ed | ||
|
|
084d8ecccd | ||
|
|
b9f3663b6c | ||
|
|
4067aa5025 | ||
|
|
ebf9316714 | ||
|
|
f5009abca6 | ||
|
|
b16a793cbc | ||
|
|
374a2415d9 | ||
|
|
3789e25a1e | ||
|
|
10ab057e29 | ||
|
|
41b9129145 | ||
|
|
f5d10b72f0 | ||
|
|
6fb6a576aa | ||
|
|
7cf567792a | ||
|
|
fe18e85e36 | ||
|
|
147476d802 | ||
|
|
c94f23a710 | ||
|
|
1a2ef4fde6 | ||
|
|
6c505f9e86 | ||
|
|
fb97540c7c | ||
|
|
9cf5c7ef74 | ||
|
|
6223e89d4c | ||
|
|
62f8cddc27 | ||
|
|
ffae767fab | ||
|
|
c23f97c3d0 | ||
|
|
11eb1e4f72 | ||
|
|
0554ed7ecb | ||
|
|
ca4ce0d380 | ||
|
|
65f50bb70d | ||
|
|
dbe9f3a034 | ||
|
|
7cdd136f61 | ||
|
|
21d5e0b71c | ||
|
|
fe53aa412b | ||
|
|
6c5a48082b | ||
|
|
b7adc27f02 | ||
|
|
67b4290846 | ||
|
|
8a7cbc8ad3 | ||
|
|
c74d87a21a | ||
|
|
6486425f46 | ||
|
|
5b316afa12 | ||
|
|
2dcc6fda77 | ||
|
|
a2f570d78c | ||
|
|
ef209e11d5 | ||
|
|
1851e76bca | ||
|
|
fa23050916 | ||
|
|
79475bde71 | ||
|
|
039201acae | ||
|
|
22454abc4a | ||
|
|
4c8b7af0eb | ||
|
|
5caf94f024 | ||
|
|
ce0b37ca2e | ||
|
|
5f529e1c10 | ||
|
|
05c923df9b | ||
|
|
90637212bc | ||
|
|
b58f45c268 | ||
|
|
6a6fd44719 | ||
|
|
81cc120539 | ||
|
|
831381a1ff | ||
|
|
fd0656e0fc | ||
|
|
e217ea0c9c | ||
|
|
bdf9333dcf | ||
|
|
eae97d6ffc | ||
|
|
9f5241e82c | ||
|
|
284eda4072 | ||
|
|
63693a4185 | ||
|
|
d9cf9071d3 | ||
|
|
5e41c7f62b | ||
|
|
e903277143 | ||
|
|
f2def38df8 | ||
|
|
71e742fb2b | ||
|
|
571e087f31 | ||
|
|
3e5f9f3b25 | ||
|
|
c969b5f329 | ||
|
|
5bcf42d398 | ||
|
|
c81b0b2a8b | ||
|
|
d52308c9b5 | ||
|
|
7948bca710 | ||
|
|
29c0b43481 | ||
|
|
9351fd09c2 | ||
|
|
59f32884d2 | ||
|
|
6ccdc5296e | ||
|
|
3ef9d96678 | ||
|
|
642ece288e | ||
|
|
3ab4f71aa1 | ||
|
|
b5be770a03 | ||
|
|
08e3428744 | ||
|
|
b335d440cf | ||
|
|
1293378c5c | ||
|
|
5424c7714f | ||
|
|
95311db543 | ||
|
|
bf52722689 | ||
|
|
6064840dd1 | ||
|
|
182adc551c | ||
|
|
2b5b79e34a | ||
|
|
508c6ced80 | ||
|
|
3c2173de9e | ||
|
|
9a6bcaadf8 | ||
|
|
08bbb0259d | ||
|
|
93638d5615 | ||
|
|
844ca57686 | ||
|
|
b2eec25f33 | ||
|
|
61d01fa2d5 | ||
|
|
a6bf6e4e07 | ||
|
|
d454482f43 | ||
|
|
f6aece6349 | ||
|
|
dc9508269d | ||
|
|
a6c41f312d | ||
|
|
f487f7420b | ||
|
|
da8f3a6e81 | ||
|
|
d102c94670 | ||
|
|
60288f7ba0 | ||
|
|
0cbe17a315 | ||
|
|
dce9f36a8e | ||
|
|
aa5100261d | ||
|
|
f4cc2a3a05 | ||
|
|
041a5249b3 | ||
|
|
a767697a86 | ||
|
|
71cb70c62c | ||
|
|
647cabc4f4 | ||
|
|
e864e33ad3 | ||
|
|
5bdbe792f5 | ||
|
|
399efb0fb2 | ||
|
|
4b72de6884 | ||
|
|
9f1473e7de | ||
|
|
d6c4df8b4b | ||
|
|
7150971dc0 | ||
|
|
d0846b8dd2 | ||
|
|
ead6885b29 | ||
|
|
d72dacdc1f | ||
|
|
1d6ddd4890 | ||
|
|
58daca1579 | ||
|
|
1e522ad8f1 | ||
|
|
8809105a8d | ||
|
|
064c3e0449 | ||
|
|
2a348e916c | ||
|
|
5744193f50 | ||
|
|
ccf352f2db | ||
|
|
6e446dc0ab | ||
|
|
566c2becdf | ||
|
|
3b3fd2b3a9 | ||
|
|
eae53d9eff | ||
|
|
42842b6b17 | ||
|
|
95f8dfb4bc | ||
|
|
a8c5934fc5 | ||
|
|
3f2a4d6eac | ||
|
|
170609a81f | ||
|
|
76fccbbba4 | ||
|
|
147ed9f24b | ||
|
|
a69bc321a9 | ||
|
|
c9e02a8b25 | ||
|
|
24d6a1e7b2 | ||
|
|
a0efa63185 | ||
|
|
fd83cea9a0 | ||
|
|
5be1eb58b2 | ||
|
|
8367c106bc | ||
|
|
8064ae1f37 | ||
|
|
ab4d9af442 | ||
|
|
eb0d3374d5 | ||
|
|
6c4c814b3f | ||
|
|
32e8e48928 | ||
|
|
53e7037f48 | ||
|
|
a566b5dc97 | ||
|
|
4dc668fd13 | ||
|
|
d085506d3e | ||
|
|
1b28a4e6f5 | ||
|
|
20e924b116 | ||
|
|
1d28ceb3d7 | ||
|
|
0ff4c040bf | ||
|
|
1002ab553e | ||
|
|
3dc94c8da7 | ||
|
|
5a5aca2113 | ||
|
|
cb22117a0f | ||
|
|
739946fa47 | ||
|
|
7939902f03 | ||
|
|
d34e08fa3d | ||
|
|
5556d1d6fc | ||
|
|
d4d1104a53 | ||
|
|
225a345baa | ||
|
|
31443dabe7 | ||
|
|
0efb901863 | ||
|
|
eb4abe900c | ||
|
|
e7ba5f9f33 | ||
|
|
995232e057 | ||
|
|
cc5d47e3ee | ||
|
|
b1de6c1d7d | ||
|
|
84bfe11285 | ||
|
|
ca78947a55 | ||
|
|
ac49f84982 | ||
|
|
cc47f02ebf | ||
|
|
ac70240b72 | ||
|
|
78b1a750fa | ||
|
|
d5a6336239 | ||
|
|
01bad0f18a | ||
|
|
1b79a9bf35 | ||
|
|
0426bf06eb | ||
|
|
3d8354fb99 | ||
|
|
696241b962 | ||
|
|
8a883f1b5e | ||
|
|
7765cee610 | ||
|
|
b958bad81f | ||
|
|
deff5d5e17 | ||
|
|
44d3f35a5f | ||
|
|
36d8bc7bc6 | ||
|
|
565dfd5b52 | ||
|
|
897c5d2371 | ||
|
|
f22d5f0fbd | ||
|
|
8c56d04988 | ||
|
|
18cfc40982 | ||
|
|
3c66f9d2dd | ||
|
|
4f3bb95a77 | ||
|
|
8aa5eb78b2 | ||
|
|
79a1f79b7c | ||
|
|
1c5c65ddf7 | ||
|
|
b2e78b9358 | ||
|
|
5e02bfe2e4 | ||
|
|
02d89a3a04 | ||
|
|
3ab0e1395a | ||
|
|
f1f606844a | ||
|
|
4e335054fb | ||
|
|
c902a6bac8 | ||
|
|
c00f0f159b | ||
|
|
86bdb9a5ad | ||
|
|
044f02c7c7 | ||
|
|
561d18efec | ||
|
|
ab10a699b1 | ||
|
|
e28733d246 | ||
|
|
a238123eb2 | ||
|
|
4337ab5cd0 | ||
|
|
7a3a3b8d89 | ||
|
|
a9cbd12330 | ||
|
|
c320c20280 | ||
|
|
b3c2fe75d3 | ||
|
|
95d3a27769 | ||
|
|
67f09c6def | ||
|
|
eaeba43179 | ||
|
|
daadc584ea | ||
|
|
da8b16f588 | ||
|
|
17738a58a2 | ||
|
|
3ebffae1c6 | ||
|
|
2ca67f1017 | ||
|
|
00c7eccb0c | ||
|
|
7f3d9e2e35 | ||
|
|
a95656b3a0 | ||
|
|
9404768f9d | ||
|
|
7559445ebe | ||
|
|
112766b265 | ||
|
|
ccf5af089d | ||
|
|
0b6f31420b | ||
|
|
b4ce805c6f | ||
|
|
08f24fbdff | ||
|
|
191925b418 | ||
|
|
84b70c970f | ||
|
|
988ce36047 | ||
|
|
24a4177a73 | ||
|
|
08ca3b7849 | ||
|
|
d0723207c3 | ||
|
|
16cf829ec3 | ||
|
|
23f9949fad | ||
|
|
fafdd4b87f | ||
|
|
f2ace729fd | ||
|
|
f0c627eebe | ||
|
|
1bf8e6bef6 | ||
|
|
f37e6ef1d1 | ||
|
|
c3ebbfa8ca | ||
|
|
12970d6975 | ||
|
|
1112ff7e7a | ||
|
|
09fd877b2a | ||
|
|
c04c0284dc | ||
|
|
239cdad57b | ||
|
|
314f95a914 | ||
|
|
e070ba61cd | ||
|
|
79576b476f | ||
|
|
8e4f987cf6 | ||
|
|
3fe3bde0c7 | ||
|
|
efe57ff15d | ||
|
|
b8a6a27fad | ||
|
|
fb3dbcf662 | ||
|
|
cb04979bb7 | ||
|
|
967b83a5d0 | ||
|
|
3fd086db4d | ||
|
|
9aedcc1777 | ||
|
|
0fb5b90e4e | ||
|
|
91bdb77a0e | ||
|
|
e7d2bb13dc | ||
|
|
39c3f67d86 | ||
|
|
49d1015a72 | ||
|
|
f3b2f30c82 | ||
|
|
ff9d81aefc | ||
|
|
eb63b6da2a | ||
|
|
c9b07ee5dd | ||
|
|
bda8ddc0e4 | ||
|
|
540a682bf0 | ||
|
|
9f7c60d2c1 | ||
|
|
80a06300ff | ||
|
|
7fae5207d1 | ||
|
|
b4f781ad47 | ||
|
|
d9468d438d | ||
|
|
25559c8781 | ||
|
|
aa760336d0 | ||
|
|
46aaa8199d | ||
|
|
10faa6f42b | ||
|
|
2d5ad346a6 | ||
|
|
207360e074 | ||
|
|
a4b954e304 | ||
|
|
3e24e371f4 | ||
|
|
bdeadaeff6 | ||
|
|
43b39f1a7c | ||
|
|
6ab7fea8de | ||
|
|
05371085f9 | ||
|
|
abe4940e74 | ||
|
|
938f42bd4f | ||
|
|
1652f35f1a | ||
|
|
3a67d0237c | ||
|
|
7888865ddc | ||
|
|
25fb0deb8d | ||
|
|
b067db633a | ||
|
|
9094f20070 | ||
|
|
9129704a93 | ||
|
|
fc244edc50 | ||
|
|
cbe635bc94 | ||
|
|
bb0a7a956a | ||
|
|
2800b021e6 | ||
|
|
74170ffb4a | ||
|
|
ab72e92fc6 | ||
|
|
8b224dd59c | ||
|
|
ce77a3cd80 | ||
|
|
79fa4e7b74 | ||
|
|
90e97856a9 | ||
|
|
61fb11a232 | ||
|
|
729ea586a8 | ||
|
|
eb28459847 | ||
|
|
10fc3bf456 | ||
|
|
244a07aef8 | ||
|
|
92cf617a53 | ||
|
|
1eb333a890 | ||
|
|
ebc280db0e | ||
|
|
3beb7f1843 | ||
|
|
56c9ea5430 | ||
|
|
05c79b7119 | ||
|
|
a6da00f801 | ||
|
|
0514c5035e | ||
|
|
2df058825a | ||
|
|
a07b8999c0 | ||
|
|
5405f0d9ed | ||
|
|
00cefe2306 | ||
|
|
8e621b6a70 | ||
|
|
5aa46c7e96 | ||
|
|
c367f928c5 | ||
|
|
4c2f287e06 | ||
|
|
567c81c0f9 | ||
|
|
31f0a9f0b6 | ||
|
|
bad14a7d09 | ||
|
|
28e8cffeaa | ||
|
|
3302a84ab5 | ||
|
|
baba0c389c | ||
|
|
0d98a4fd0c | ||
|
|
09344cfb44 | ||
|
|
9a00dca749 | ||
|
|
e7253b7ca6 | ||
|
|
dddba68bfd | ||
|
|
40c287028b | ||
|
|
30124de409 |
6
.dockerignore
Normal file
6
.dockerignore
Normal file
@@ -0,0 +1,6 @@
|
||||
.git
|
||||
.github
|
||||
.vscode
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
33
.editorconfig
Normal file
33
.editorconfig
Normal file
@@ -0,0 +1,33 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
# Matches multiple files with brace expansion notation
|
||||
# Set default charset
|
||||
[*.{js,py}]
|
||||
charset = utf-8
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
# Indentation override for all JS under lib directory
|
||||
[lib/**.js]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -2,5 +2,6 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
/frontend/ @palash-signoz
|
||||
/deploy/ @prashant-shahi
|
||||
/frontend/ @palashgdev @pranshuchittora
|
||||
/deploy/ @prashant-shahi
|
||||
/pkg/query-service/ @srikanthccv
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -26,6 +26,7 @@ assignees: ''
|
||||
* **Signoz version**:
|
||||
* **Browser version**:
|
||||
* **Your OS and version**:
|
||||
* **Your CPU Architecture**(ARM/Intel):
|
||||
|
||||
## Additional context
|
||||
|
||||
|
||||
57
.github/workflows/build.yaml
vendored
57
.github/workflows/build.yaml
vendored
@@ -1,50 +1,27 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'frontend/**'
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
get_filters:
|
||||
runs-on: ubuntu-latest
|
||||
# Set job outputs to values from filter step
|
||||
outputs:
|
||||
frontend: ${{ steps.filter.outputs.frontend }}
|
||||
query-service: ${{ steps.filter.outputs.query-service }}
|
||||
flattener: ${{ steps.filter.outputs.flattener }}
|
||||
steps:
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
frontend:
|
||||
- 'frontend/**'
|
||||
query-service:
|
||||
- 'pkg/query-service/**'
|
||||
flattener:
|
||||
- 'pkg/processors/flattener/**'
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run Prettier
|
||||
run: cd frontend && npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
continue-on-error: true
|
||||
- name: Run Jest
|
||||
run: cd frontend && npm run jest
|
||||
- name: TSC
|
||||
run: yarn tsc
|
||||
working-directory: ./frontend
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -52,26 +29,24 @@ jobs:
|
||||
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build query-service image
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
- name: Build query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-query-service-amd64
|
||||
|
||||
build-flattener:
|
||||
build-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build flattener docker image
|
||||
- name: Build EE query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-flattener-amd64
|
||||
make build-ee-query-service-amd64
|
||||
|
||||
17
.github/workflows/codeball.yml
vendored
Normal file
17
.github/workflows/codeball.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Codeball
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
codeball_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Codeball
|
||||
steps:
|
||||
# Run Codeball on all new Pull Requests 🚀
|
||||
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
|
||||
- name: Codeball
|
||||
uses: sturdy-dev/codeball-action@v2
|
||||
with:
|
||||
approvePullRequests: "true"
|
||||
labelPullRequestsWhenApproved: "true"
|
||||
labelPullRequestsWhenReviewNeeded: "false"
|
||||
failJobsWhenReviewNeeded: "false"
|
||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
env:
|
||||
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
jobs:
|
||||
create_issue_on_merge:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
uses: bahmutov/npm-install@v1
|
||||
with:
|
||||
install-command: yarn --frozen-lockfile
|
||||
- name: Comment on PR
|
||||
run: node create-issue.js
|
||||
22
.github/workflows/dependency-review.yml
vendored
Normal file
22
.github/workflows/dependency-review.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Dependency Review Action
|
||||
#
|
||||
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
|
||||
#
|
||||
# Source repository: https://github.com/actions/dependency-review-action
|
||||
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v3
|
||||
- name: 'Dependency Review'
|
||||
with:
|
||||
fail-on-severity: high
|
||||
uses: actions/dependency-review-action@v2
|
||||
9
.github/workflows/e2e-k3s.yaml
vendored
9
.github/workflows/e2e-k3s.yaml
vendored
@@ -16,7 +16,9 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build query-service image
|
||||
run: make build-query-service-amd64
|
||||
env:
|
||||
DEV_BUILD: 1
|
||||
run: make build-ee-query-service-amd64
|
||||
|
||||
- name: Build frontend image
|
||||
run: make build-frontend-amd64
|
||||
@@ -52,14 +54,11 @@ jobs:
|
||||
helm install my-release signoz/signoz -n platform \
|
||||
--wait \
|
||||
--timeout 10m0s \
|
||||
--set cloud=null \
|
||||
--set frontend.service.type=LoadBalancer \
|
||||
--set query-service.image.tag=$DOCKER_TAG \
|
||||
--set queryService.image.tag=$DOCKER_TAG \
|
||||
--set frontend.image.tag=$DOCKER_TAG
|
||||
|
||||
# get pods, services and the container images
|
||||
kubectl describe deploy/my-release-frontend -n platform | grep Image
|
||||
kubectl describe statefulset/my-release-query-service -n platform | grep Image
|
||||
kubectl get pods -n platform
|
||||
kubectl get svc -n platform
|
||||
|
||||
|
||||
24
.github/workflows/playwright.yaml
vendored
Normal file
24
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Playwright Tests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
playwright:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "16.x"
|
||||
- name: Install dependencies
|
||||
run: CI=1 yarn install
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps
|
||||
- name: Run Playwright tests
|
||||
run: yarn playwright
|
||||
env:
|
||||
# This might depend on your test-runner/language binding
|
||||
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}
|
||||
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||
# a comment alerting users of this missing requirement.
|
||||
name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, synchronize, opened, reopened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
verify_linked_issue:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
37
.github/workflows/push.yaml
vendored
37
.github/workflows/push.yaml
vendored
@@ -11,6 +11,41 @@ on:
|
||||
jobs:
|
||||
|
||||
image-build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v5.1
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-query-service
|
||||
|
||||
image-build-and-push-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -43,7 +78,7 @@ jobs:
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-query-service
|
||||
run: make build-push-ee-query-service
|
||||
|
||||
image-build-and-push-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
2
.github/workflows/remove-label.yaml
vendored
2
.github/workflows/remove-label.yaml
vendored
@@ -11,6 +11,6 @@ jobs:
|
||||
- name: Remove label
|
||||
uses: buildsville/add-remove-label@v1
|
||||
with:
|
||||
label: ok-to-test
|
||||
label: ok-to-test,testing-deploy
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
38
.github/workflows/staging-deployment.yaml
vendored
Normal file
38
.github/workflows/staging-deployment.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to develop branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest develop branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v0.1.6
|
||||
env:
|
||||
GITHUB_BRANCH: develop
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.EC2_SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
docker system prune --force
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-signoz
|
||||
39
.github/workflows/testing-deployment.yaml
vendored
Normal file
39
.github/workflows/testing-deployment.yaml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: testing-deployment
|
||||
# Trigger deployment only on testing-deploy label on pull request
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy PR branch to testing
|
||||
runs-on: ubuntu-latest
|
||||
environment: testing
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v0.1.6
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.EC2_SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
docker system prune --force
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-signoz
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
|
||||
node_modules
|
||||
yarn.lock
|
||||
package.json
|
||||
@@ -5,6 +6,7 @@ package.json
|
||||
deploy/docker/environment_tiny/common_test
|
||||
frontend/node_modules
|
||||
frontend/.pnp
|
||||
frontend/i18n-translations-hash.json
|
||||
*.pnp.js
|
||||
|
||||
# testing
|
||||
@@ -15,6 +17,7 @@ frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
frontend/test-results
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
@@ -27,10 +30,6 @@ frontend/npm-debug.log*
|
||||
frontend/yarn-debug.log*
|
||||
frontend/yarn-error.log*
|
||||
frontend/src/constants/env.ts
|
||||
frontend/cypress/**/*.mp4
|
||||
|
||||
# env file for cypress
|
||||
frontend/cypress.env.json
|
||||
|
||||
.idea
|
||||
|
||||
@@ -42,4 +41,15 @@ frontend/cypress.env.json
|
||||
|
||||
frontend/*.env
|
||||
pkg/query-service/signoz.db
|
||||
|
||||
pkg/query-service/tests/test-deploy/data/
|
||||
|
||||
ee/query-service/signoz.db
|
||||
|
||||
ee/query-service/tests/test-deploy/data/
|
||||
|
||||
# local data
|
||||
*.db
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
@@ -11,7 +11,7 @@ tasks:
|
||||
- name: Run Docker Images
|
||||
init: |
|
||||
cd ./deploy
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
# command:
|
||||
|
||||
- name: Run Frontend
|
||||
@@ -22,7 +22,7 @@ tasks:
|
||||
yarn dev
|
||||
|
||||
ports:
|
||||
- port: 3000
|
||||
- port: 3301
|
||||
onOpen: open-browser
|
||||
- port: 8080
|
||||
onOpen: ignore
|
||||
|
||||
@@ -4,4 +4,4 @@
|
||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||
|
||||
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
|
||||
416
CONTRIBUTING.md
416
CONTRIBUTING.md
@@ -1,127 +1,385 @@
|
||||
# How to Contribute
|
||||
# Contributing Guidelines
|
||||
|
||||
There are primarily 2 areas in which you can contribute in SigNoz
|
||||
## Welcome to SigNoz Contributing section 🎉
|
||||
|
||||
- Frontend ( written in Typescript, React)
|
||||
- Backend - ( Query Service - written in Go)
|
||||
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
|
||||
|
||||
> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
- We accept contributions made to the [SigNoz `develop` branch]()
|
||||
- Find all SigNoz Docker Hub images here
|
||||
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
|
||||
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
|
||||
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
|
||||
|
||||
> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
## Finding contributions to work on 💬
|
||||
|
||||
# Develop Frontend
|
||||
Looking at the existing issues is a great way to find something to contribute on.
|
||||
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
|
||||
|
||||
### Contribute to Frontend with Docker installation of SigNoz
|
||||
## Sections:
|
||||
- [General Instructions](#1-general-instructions-)
|
||||
- [For Creating Issue(s)](#11-for-creating-issues)
|
||||
- [For Pull Requests(s)](#12-for-pull-requests)
|
||||
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
|
||||
- [Develop Frontend](#3-develop-frontend-)
|
||||
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
|
||||
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
|
||||
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
|
||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
|
||||
- run `cd deploy` to move to deploy directory
|
||||
- Install signoz locally without the frontend
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
|
||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
# 1. General Instructions 📝
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
## 1.1 For Creating Issue(s)
|
||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||
|
||||
### Contribute to Frontend without installing SigNoz backend
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
|
||||
#### Details like these are incredibly useful:
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
|
||||
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
- **Requirement** - what kind of use case are you trying to solve?
|
||||
- **Proposal** - what do you suggest to solve the problem or improve the existing
|
||||
situation?
|
||||
- Any open questions to address❓
|
||||
|
||||
**_Frontend should now be accessible at `http://localhost:3301/application`_**
|
||||
#### If you are reporting a bug, details like these are incredibly useful:
|
||||
|
||||
# Contribute to Query-Service
|
||||
- A reproducible test case or series of steps.
|
||||
- The version of our code being used.
|
||||
- Any modifications you've made relevant to the bug🐞.
|
||||
- Anything unusual about your environment or deployment.
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone 🙌.
|
||||
|
||||
### To run ClickHouse setup (recommended for local development)
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
- git clone https://github.com/SigNoz/signoz.git
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L59`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- Install signoz locally without the frontend and query-service
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
|
||||
## 1.2 For Pull Request(s)
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
|
||||
Before sending us a pull request, please ensure that,
|
||||
|
||||
**_Query Service should now be available at `http://localhost:8080`_**
|
||||
- Fork the SigNoz repo on GitHub, clone it on your machine.
|
||||
- Create a branch with your changes.
|
||||
- You are working against the latest source on the `develop` branch.
|
||||
- Modify the source; please focus only on the specific change you are contributing.
|
||||
- Ensure local tests pass.
|
||||
- Commit to your fork using clear commit messages.
|
||||
- Send us a pull request, answering any default questions in the pull request interface.
|
||||
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
|
||||
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
|
||||
- Once the change has been approved and merged, we will inform you in a comment.
|
||||
|
||||
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
||||
|
||||
---
|
||||
Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
|
||||
|
||||
* 1️⃣ First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes, etc...
|
||||
* This PR is usually trivial to review, so the size limit does not apply to
|
||||
it.
|
||||
* 2️⃣ Second PR should include the concrete implementation of the component. If the
|
||||
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
|
||||
multiple PRs.
|
||||
* If there are multiple sub-component then ideally each one should be implemented as
|
||||
a **separate** pull request.
|
||||
* Last PR should include changes to **any user-facing documentation.** And should include
|
||||
end-to-end tests if applicable. The component must be enabled
|
||||
only after sufficient testing, and there is enough confidence in the
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
||||
|
||||
### Pointers:
|
||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
<hr>
|
||||
|
||||
### Conventions to follow when submitting Commits and Pull Request(s).
|
||||
|
||||
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
|
||||
|
||||
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
|
||||
|
||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 2. How to Contribute 🙋🏻♂️
|
||||
|
||||
#### There are primarily 2 areas in which you can contribute to SigNoz
|
||||
|
||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||
|
||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||
|
||||
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
|
||||
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 3. Develop Frontend 🌚
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
|
||||
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
|
||||
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
|
||||

|
||||
|
||||
|
||||
- run `cd deploy` to move to deploy directory,
|
||||
- Install signoz locally **without** the frontend,
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
|
||||
```
|
||||
ports:
|
||||
- "8080:8080"
|
||||
```
|
||||
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
|
||||
|
||||
- Next run,
|
||||
```
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
```
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
|
||||
If you have backend api exposed via frontend nginx:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:3301
|
||||
```
|
||||
If not:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz/frontend directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
|
||||
````
|
||||
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
|
||||
|
||||
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
|
||||
|
||||
## 4.1 Prerequisites
|
||||
|
||||
### 4.1.1 Install SQLite3
|
||||
|
||||
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
|
||||
|
||||
- If not installed already, Install using below command
|
||||
- on Linux
|
||||
- on Debian / Ubuntu
|
||||
```
|
||||
sudo apt install sqlite3
|
||||
```
|
||||
- on CentOS / Fedora / RedHat
|
||||
```
|
||||
sudo yum install sqlite3
|
||||
```
|
||||
|
||||
## 4.2 To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||
|
||||
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
|
||||
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
|
||||
```
|
||||
ports:
|
||||
- 9001:9000
|
||||
```
|
||||
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
|
||||
|
||||
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||
- Then, you need to create a `.env` file with the following environment variable
|
||||
```
|
||||
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
|
||||
```
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
|
||||
|
||||
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
|
||||
|
||||
#### Run locally,
|
||||
```
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||
```
|
||||
|
||||
#### Build and Run locally
|
||||
```
|
||||
cd pkg/query-service
|
||||
go build -o build/query-service main.go
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
|
||||
```
|
||||
|
||||
#### Docker Images
|
||||
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
|
||||
|
||||
```
|
||||
docker pull signoz/query-service
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:latest
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:develop
|
||||
```
|
||||
|
||||
### Important Note:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
|
||||
|
||||
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
Click the button below. A workspace with all required environments will be created.
|
||||
|
||||
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz`
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
# Contribute to SigNoz Helm Chart
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
|
||||
# 5. Contribute to SigNoz Helm Chart 📊
|
||||
|
||||
### To run helm chart for local development
|
||||
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
|
||||
|
||||
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
|
||||
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
## 5.1 To run helm chart for local development
|
||||
|
||||
- Clone the SigNoz repository and cd into charts directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/charts.git && cd charts
|
||||
```
|
||||
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [k3d](https://k3d.io/#installation)
|
||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
||||
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
|
||||
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
|
||||
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
|
||||
- next run,
|
||||
```
|
||||
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
|
||||
```
|
||||
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
|
||||
**To load data with HotROD sample app:**
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
**To stop the load generation:**
|
||||
**5.1.2 To load data with the HotROD sample app:**
|
||||
|
||||
```sh
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**5.1.3 To stop the load generation:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
---
|
||||
|
||||
## General Instructions
|
||||
## Other Ways to Contribute
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
||||
There are many other ways to get involved with the community and to participate in this project:
|
||||
|
||||
- If you find any bugs, please create an issue
|
||||
- If you find anything missing in documentation, you can create an issue with label **documentation**
|
||||
- If you want to build any new feature, please create an issue with label `enhancement`
|
||||
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
|
||||
- Use the product, submitting GitHub issues when a problem is found.
|
||||
- Help code review pull requests and participate in issue threads.
|
||||
- Submit a new feature request as an issue.
|
||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||
- Tell others about the project on Twitter, your blog, etc.
|
||||
|
||||
### Conventions to follow when submitting commits, PRs
|
||||
|
||||
1. We try to follow https://www.conventionalcommits.org/en/v1.0.0/
|
||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
More specifically the commits and PRs should have type specifiers prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
|
||||
|
||||
2. Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows
|
||||
|
||||
3. Feel free to ping us on `#contributing` or `#contributing-frontend` on our slack community if you need any help on this :)
|
||||
Thank You!
|
||||
|
||||
8
LICENSE
8
LICENSE
@@ -1,6 +1,10 @@
|
||||
MIT License
|
||||
Copyright (c) 2020-present SigNoz Inc.
|
||||
|
||||
Copyright (c) 2021 SigNoz
|
||||
Portions of this software are licensed as follows:
|
||||
|
||||
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
|
||||
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
103
Makefile
103
Makefile
@@ -7,29 +7,36 @@ BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
FLATTENER_DIRECTORY ?= pkg/processors/flattener
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
LOCAL_GOOS ?= $(shell go env GOOS)
|
||||
LOCAL_GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= latest
|
||||
|
||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
|
||||
DEV_BUILD ?= ""
|
||||
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/query-service
|
||||
buildVersion=${PACKAGE}/version.buildVersion
|
||||
buildHash=${PACKAGE}/version.buildHash
|
||||
buildTime=${PACKAGE}/version.buildTime
|
||||
gitBranch=${PACKAGE}/version.gitBranch
|
||||
PACKAGE?=go.signoz.io/signoz
|
||||
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
|
||||
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
|
||||
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
|
||||
all: build-push-frontend build-push-query-service build-push-flattener
|
||||
all: build-push-frontend build-push-query-service
|
||||
# Steps to build and push docker image of frontend
|
||||
.PHONY: build-frontend-amd64 build-push-frontend
|
||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||
@@ -38,7 +45,8 @@ build-frontend-amd64:
|
||||
@echo "--> Building frontend docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" .
|
||||
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of frontend(used in push pipeline)
|
||||
build-push-frontend:
|
||||
@@ -46,7 +54,8 @@ build-push-frontend:
|
||||
@echo "--> Building and pushing frontend docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
docker buildx build --file Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
.PHONY: build-query-service-amd64 build-push-query-service
|
||||
@@ -55,34 +64,42 @@ build-query-service-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of flattener
|
||||
.PHONY: build-flattener-amd64 build-push-flattener
|
||||
# Step to build docker image of flattener in amd64 (used in build pipeline)
|
||||
build-flattener-amd64:
|
||||
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
||||
build-ee-query-service-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building flattener docker image for amd64"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
|
||||
else \
|
||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
|
||||
fi
|
||||
|
||||
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
|
||||
build-push-flattener:
|
||||
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-ee-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing flattener docker image"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--progress plane --push --platform linux/arm64,linux/amd64 \
|
||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
dev-setup:
|
||||
mkdir -p /var/lib/signoz
|
||||
@@ -92,8 +109,32 @@ dev-setup:
|
||||
@echo "--> Local Setup completed"
|
||||
@echo "------------------"
|
||||
|
||||
run-x86:
|
||||
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
run-local:
|
||||
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
up --build -d
|
||||
|
||||
run-arm:
|
||||
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
||||
down-local:
|
||||
@docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
down -v
|
||||
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
run-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||
|
||||
down-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
clear-standalone-data:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
clear-swarm-data:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/app/metrics/...
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
@@ -15,10 +14,10 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
<a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
80
README.md
80
README.md
@@ -5,8 +5,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
@@ -15,16 +14,18 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
|
||||
|
||||
👉 Visualise Metrics, Traces and Logs in a single pane of glass
|
||||
|
||||
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
|
||||
|
||||
@@ -32,10 +33,17 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
|
||||
|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -51,12 +59,12 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
## Features:
|
||||
|
||||
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
|
||||
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
|
||||
- Slowest endpoints in your application
|
||||
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
||||
- Filter traces by service name, operation, latency, error, tags/annotations.
|
||||
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -78,6 +86,12 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
|
||||
- Python
|
||||
- NodeJS
|
||||
- Go
|
||||
- PHP
|
||||
- .NET
|
||||
- Ruby
|
||||
- Elixir
|
||||
- Rust
|
||||
|
||||
|
||||
You can find the complete list of languages here - https://opentelemetry.io/docs/
|
||||
|
||||
@@ -86,8 +100,7 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Getting Started
|
||||
|
||||
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
|
||||
@@ -100,7 +113,6 @@ The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshoo
|
||||
### Deploy in Kubernetes using Helm
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -110,7 +122,7 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
|
||||
|
||||
### SigNoz vs Prometheus
|
||||
|
||||
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
|
||||
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
|
||||
|
||||
Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
|
||||
|
||||
@@ -118,24 +130,55 @@ Our goal is to provide an integrated UI between metrics & traces - similar to wh
|
||||
|
||||
### SigNoz vs Jaeger
|
||||
|
||||
Jaeger only does distributed tracing. SigNoz does both metrics and traces, and we also have log management in our roadmap.
|
||||
Jaeger only does distributed tracing. SigNoz supports metrics, traces and logs - all the 3 pillars of observability.
|
||||
|
||||
Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
|
||||
- Jaegar UI doesn’t show any metrics on traces or on filtered traces
|
||||
- Jaeger can’t get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Elastic
|
||||
|
||||
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
|
||||
- 50% lower resource requirement compared to Elastic during ingestion
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Loki
|
||||
|
||||
- SigNoz supports aggregations on high-cardinality data over a huge volume while loki doesn’t.
|
||||
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
|
||||
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
@@ -154,11 +197,8 @@ Join the [slack community](https://signoz.io/slack) to know more about distribut
|
||||
|
||||
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
As always, thanks to our amazing contributors!
|
||||
As always, thanks to our amazing contributors!
|
||||
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
@@ -14,14 +13,19 @@
|
||||
|
||||
##
|
||||
|
||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式跟踪来增加软件技术栈的可见性。
|
||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
|
||||
|
||||
👉 你能看到一些性能矩阵,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
||||
👉 你能看到一些性能指标,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
||||
|
||||
👉 通过准确的跟踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
||||
👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
||||
|
||||
👉 聚合trace数据来获得业务相关指标。
|
||||
|
||||

|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -37,12 +41,12 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
## 功能:
|
||||
|
||||
- 应用总览矩阵(matrix),如RPS, 50/90/99百分比延迟率,错误率
|
||||
- 应用概览指标(metrics),如RPS, p50/p90/p99延迟率分位值,错误率等。
|
||||
- 应用中最慢的终端(endpoint)
|
||||
- 查看准确的网络请求跟踪来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
||||
- 通过服务名称、操作、延迟、错误、标签来过滤跟踪
|
||||
- 对过滤后的跟踪数据做矩阵聚合。比如,获得过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal`的错误率和p99延迟
|
||||
- 整合的矩阵和跟踪用户界面。不需要像从Prometheus切换到Jaeger才能调试问题
|
||||
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
||||
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
|
||||
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
|
||||
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -54,7 +58,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
我们想做一个自服务的开源版本的工具,类似于DataDog和NewRelic,用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
|
||||
|
||||
开源也让你对配置、采样和上线率有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
||||
开源也让你对配置、采样和正常运行时间有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
||||
|
||||
### 语言支持
|
||||
|
||||
@@ -72,8 +76,8 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## 入门
|
||||
|
||||
|
||||
|
||||
|
||||
### 使用Docker部署
|
||||
|
||||
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
|
||||
@@ -81,35 +85,34 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
|
||||
|
||||
### 使用Helm在Kubernetes上部署
|
||||
|
||||
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Comparisons to Familiar Tools
|
||||
## 与其他方案的比较
|
||||
|
||||
### SigNoz vs Prometheus
|
||||
|
||||
如果你只是需要矩阵,那Prometheus是不错的,但如果你要无缝的在矩阵和跟踪之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
||||
如果你只是需要监控指标(metrics),那Prometheus是不错的,但如果你要无缝的在metrics和traces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
||||
|
||||
我们的目标是在矩阵和跟踪之间提供整合的UI - 类似于Datadog这样的Saas厂提供的方案,能够对跟踪进行过滤和聚合,这是目前Jaeger缺失的功能。
|
||||
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合,这是目前Jaeger缺失的功能。
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Jaeger
|
||||
|
||||
Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们在计划中也有日志管理功能。
|
||||
Jaeger只做分布式追踪(distributed tracing),SigNoz则支持metrics,traces,logs ,即可视化的三大支柱。
|
||||
|
||||
并且SigNoz有一些Jaeger没有的高级功能:
|
||||
|
||||
- Jaegar UI无法在跟踪或过滤的跟踪基础上展示矩阵。
|
||||
- Jaeger不能在过滤的跟踪上进行聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟,在SigNoz里这很容易实现。
|
||||
- Jaegar UI无法在traces或过滤的traces上展示metrics。
|
||||
- Jaeger不能对过滤的traces做聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -122,6 +125,23 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们
|
||||
|
||||
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||
|
||||
## Supported Versions
|
||||
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||
|
||||
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||
|
||||
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thank you for keeping SigNoz and our users safe. 🙇
|
||||
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
75
deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
Normal file
75
deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
Normal file
@@ -0,0 +1,75 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>clickhouse</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
File diff suppressed because it is too large
Load Diff
29
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
29
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -1,106 +1,239 @@
|
||||
version: "3"
|
||||
version: "3.9"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-clickhouse-depend: &clickhouse-depend
|
||||
depends_on:
|
||||
- clickhouse
|
||||
# - clickhouse-2
|
||||
# - clickhouse-3
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
- 8123:8123
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.0
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
restart: always
|
||||
image: signoz/query-service:0.15.0
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
image: signoz/frontend:0.15.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=2000"]
|
||||
image: signoz/signoz-otel-collector:0.66.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
otel-collector-hostmetrics:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config-hostmetrics.yaml", "--mem-ballast-size-mib=683"]
|
||||
otel-collector-metrics:
|
||||
image: signoz/signoz-otel-collector:0.66.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config-hostmetrics.yaml:/etc/otel-collector-config-hostmetrics.yaml
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
@@ -110,4 +243,4 @@ services:
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
|
||||
hostmetrics:
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.signoz_otel-collector'
|
||||
type: 'A'
|
||||
port: 8888
|
||||
- job_name: "otel-collector-hostmetrics"
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: ["otel-collector-hostmetrics:8888"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
metrics:
|
||||
receivers: [otlp, prometheus, hostmetrics]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -1,47 +1,170 @@
|
||||
receivers:
|
||||
filelog/dockercontainers:
|
||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
||||
start_at: end
|
||||
include_file_path: true
|
||||
include_file_name: false
|
||||
operators:
|
||||
- type: json_parser
|
||||
id: parser-docker
|
||||
output: extract_metadata_from_filepath
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: regex_parser
|
||||
id: extract_metadata_from_filepath
|
||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
||||
parse_from: attributes["log.file.path"]
|
||||
output: parse_body
|
||||
- type: move
|
||||
id: parse_body
|
||||
from: attributes.log
|
||||
to: body
|
||||
output: time
|
||||
- type: remove
|
||||
id: time
|
||||
field: attributes.time
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: localhost:12345
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: 'signoz.collector.id'
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 5s
|
||||
sending_queue:
|
||||
queue_size: 100
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
initial_interval: 5s
|
||||
max_interval: 30s
|
||||
max_elapsed_time: 300s
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
receivers:
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
# otel-collector-metrics internal metrics
|
||||
- job_name: otel-collector-metrics
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector-metrics
|
||||
# SigNoz span metrics
|
||||
- job_name: signozspanmetrics-collector
|
||||
scrape_interval: 60s
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- tasks.otel-collector
|
||||
type: A
|
||||
port: 8889
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -9,17 +9,17 @@ alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
@@ -1,30 +1,43 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
75
deploy/docker/clickhouse-setup/clickhouse-cluster.xml
Normal file
75
deploy/docker/clickhouse-setup/clickhouse-cluster.xml
Normal file
@@ -0,0 +1,75 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
||||
-->
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<!-- Inter-server per-cluster secret for Distributed queries
|
||||
default: no secret (no authentication will be performed)
|
||||
|
||||
If set, then Distributed queries will be validated on shards, so at least:
|
||||
- such cluster should exist on the shard,
|
||||
- such cluster should have the same secret.
|
||||
|
||||
And also (and which is more important), the initial_user will
|
||||
be used as current user for the query.
|
||||
|
||||
Right now the protocol is pretty simple and it only takes into account:
|
||||
- cluster name
|
||||
- query
|
||||
|
||||
Also it will be nice if the following will be implemented:
|
||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
||||
- target hostname / ip address (same notes as for source hostname)
|
||||
- time-based security tokens
|
||||
-->
|
||||
<!-- <secret></secret> -->
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>clickhouse</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
File diff suppressed because it is too large
Load Diff
29
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
29
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
21
deploy/docker/clickhouse-setup/custom-function.xml
Normal file
21
deploy/docker/clickhouse-setup/custom-function.xml
Normal file
@@ -0,0 +1,21 @@
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable</type>
|
||||
<name>histogramQuantile</name>
|
||||
<return_type>Float64</return_type>
|
||||
<argument>
|
||||
<type>Array(Float64)</type>
|
||||
<name>buckets</name>
|
||||
</argument>
|
||||
<argument>
|
||||
<type>Array(Float64)</type>
|
||||
<name>counts</name>
|
||||
</argument>
|
||||
<argument>
|
||||
<type>Float64</type>
|
||||
<name>quantile</name>
|
||||
</argument>
|
||||
<format>CSV</format>
|
||||
<command>./histogramQuantile</command>
|
||||
</function>
|
||||
</functions>
|
||||
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
108
deploy/docker/clickhouse-setup/docker-compose-core.yaml
Normal file
108
deploy/docker/clickhouse-setup/docker-compose-core.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
container_name: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
container_name: alertmanager
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: otel-collector
|
||||
image: signoz/signoz-otel-collector:0.66.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
container_name: otel-collector-metrics
|
||||
image: signoz/signoz-otel-collector:0.66.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
56
deploy/docker/clickhouse-setup/docker-compose-local.yaml
Normal file
56
deploy/docker/clickhouse-setup/docker-compose-local.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
query-service:
|
||||
hostname: query-service
|
||||
build:
|
||||
context: "../../../pkg/query-service"
|
||||
dockerfile: "./Dockerfile"
|
||||
args:
|
||||
LDFLAGS: ""
|
||||
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
|
||||
container_name: query-service
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
- "8080:8080"
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: "../../../frontend"
|
||||
dockerfile: "./Dockerfile"
|
||||
args:
|
||||
TARGETOS: "${LOCAL_GOOS}"
|
||||
TARGETPLATFORM: "${LOCAL_GOARCH}"
|
||||
container_name: frontend
|
||||
environment:
|
||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
@@ -1,98 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.6.1
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.6.1
|
||||
container_name: frontend
|
||||
depends_on:
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.5.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
mem_limit: 2000m
|
||||
restart: always
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.5.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -1,51 +1,191 @@
|
||||
version: "2.4"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-clickhouse-depend: &clickhouse-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.0
|
||||
container_name: zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
<<: *clickhouse-defaults
|
||||
container_name: clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.0-0.2}
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.6.1
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.15.0}
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
<<: *clickhouse-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.6.1
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.15.0}
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
@@ -53,37 +193,53 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.5.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.3}
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- DOCKER_MULTI_NODE_CLUSTER=false
|
||||
ports:
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
mem_limit: 2000m
|
||||
restart: always
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.5.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.3}
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
<<: *clickhouse-depend
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
|
||||
@@ -1,67 +1,175 @@
|
||||
receivers:
|
||||
filelog/dockercontainers:
|
||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
||||
start_at: end
|
||||
include_file_path: true
|
||||
include_file_name: false
|
||||
operators:
|
||||
- type: json_parser
|
||||
id: parser-docker
|
||||
output: extract_metadata_from_filepath
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: regex_parser
|
||||
id: extract_metadata_from_filepath
|
||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
||||
parse_from: attributes["log.file.path"]
|
||||
output: parse_body
|
||||
- type: move
|
||||
id: parse_body
|
||||
from: attributes.log
|
||||
to: body
|
||||
output: time
|
||||
- type: remove
|
||||
id: time
|
||||
field: attributes.time
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
endpoint: localhost:12345
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
dimensions_cache_size: 100000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: 'signoz.collector.id'
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/
|
||||
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
|
||||
timeout: 5s
|
||||
sending_queue:
|
||||
queue_size: 100
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
initial_interval: 5s
|
||||
max_interval: 30s
|
||||
max_elapsed_time: 300s
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -3,42 +3,67 @@ receivers:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
# otel-collector-metrics internal metrics
|
||||
- job_name: otel-collector-metrics
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector-metrics
|
||||
# SigNoz span metrics
|
||||
- job_name: signozspanmetrics-collector
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- otel-collector:8889
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
exporters: [clickhousemetricswrite]
|
||||
|
||||
@@ -19,8 +19,7 @@ rule_files:
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
BIN
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile
Executable file
BIN
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile
Executable file
Binary file not shown.
237
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile.go
Normal file
237
deploy/docker/clickhouse-setup/user_scripts/histogramQuantile.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NOTE: executable must be built with target OS and architecture set to linux/amd64
|
||||
// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
|
||||
|
||||
// The following code is adapted from the following source:
|
||||
// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
|
||||
|
||||
type bucket struct {
|
||||
upperBound float64
|
||||
count float64
|
||||
}
|
||||
|
||||
// buckets implements sort.Interface.
|
||||
type buckets []bucket
|
||||
|
||||
func (b buckets) Len() int { return len(b) }
|
||||
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
|
||||
|
||||
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
|
||||
// buckets will be sorted by upperBound by this function (i.e. no sorting
|
||||
// needed before calling this function). The quantile value is interpolated
|
||||
// assuming a linear distribution within a bucket. However, if the quantile
|
||||
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
|
||||
// returned. A natural lower bound of 0 is assumed if the upper bound of the
|
||||
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
|
||||
// happens linearly between 0 and the upper bound of the lowest bucket.
|
||||
// However, if the lowest bucket has an upper bound less or equal 0, this upper
|
||||
// bound is returned if the quantile falls into the lowest bucket.
|
||||
//
|
||||
// There are a number of special cases (once we have a way to report errors
|
||||
// happening during evaluations of AST functions, we should report those
|
||||
// explicitly):
|
||||
//
|
||||
// If 'buckets' has 0 observations, NaN is returned.
|
||||
//
|
||||
// If 'buckets' has fewer than 2 elements, NaN is returned.
|
||||
//
|
||||
// If the highest bucket is not +Inf, NaN is returned.
|
||||
//
|
||||
// If q==NaN, NaN is returned.
|
||||
//
|
||||
// If q<0, -Inf is returned.
|
||||
//
|
||||
// If q>1, +Inf is returned.
|
||||
func bucketQuantile(q float64, buckets buckets) float64 {
|
||||
if math.IsNaN(q) {
|
||||
return math.NaN()
|
||||
}
|
||||
if q < 0 {
|
||||
return math.Inf(-1)
|
||||
}
|
||||
if q > 1 {
|
||||
return math.Inf(+1)
|
||||
}
|
||||
sort.Sort(buckets)
|
||||
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
|
||||
return math.NaN()
|
||||
}
|
||||
|
||||
buckets = coalesceBuckets(buckets)
|
||||
ensureMonotonic(buckets)
|
||||
|
||||
if len(buckets) < 2 {
|
||||
return math.NaN()
|
||||
}
|
||||
observations := buckets[len(buckets)-1].count
|
||||
if observations == 0 {
|
||||
return math.NaN()
|
||||
}
|
||||
rank := q * observations
|
||||
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
|
||||
|
||||
if b == len(buckets)-1 {
|
||||
return buckets[len(buckets)-2].upperBound
|
||||
}
|
||||
if b == 0 && buckets[0].upperBound <= 0 {
|
||||
return buckets[0].upperBound
|
||||
}
|
||||
var (
|
||||
bucketStart float64
|
||||
bucketEnd = buckets[b].upperBound
|
||||
count = buckets[b].count
|
||||
)
|
||||
if b > 0 {
|
||||
bucketStart = buckets[b-1].upperBound
|
||||
count -= buckets[b-1].count
|
||||
rank -= buckets[b-1].count
|
||||
}
|
||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
||||
}
|
||||
|
||||
// coalesceBuckets merges buckets with the same upper bound.
|
||||
//
|
||||
// The input buckets must be sorted.
|
||||
func coalesceBuckets(buckets buckets) buckets {
|
||||
last := buckets[0]
|
||||
i := 0
|
||||
for _, b := range buckets[1:] {
|
||||
if b.upperBound == last.upperBound {
|
||||
last.count += b.count
|
||||
} else {
|
||||
buckets[i] = last
|
||||
last = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
buckets[i] = last
|
||||
return buckets[:i+1]
|
||||
}
|
||||
|
||||
// The assumption that bucket counts increase monotonically with increasing
|
||||
// upperBound may be violated during:
|
||||
//
|
||||
// * Recording rule evaluation of histogram_quantile, especially when rate()
|
||||
// has been applied to the underlying bucket timeseries.
|
||||
// * Evaluation of histogram_quantile computed over federated bucket
|
||||
// timeseries, especially when rate() has been applied.
|
||||
//
|
||||
// This is because scraped data is not made available to rule evaluation or
|
||||
// federation atomically, so some buckets are computed with data from the
|
||||
// most recent scrapes, but the other buckets are missing data from the most
|
||||
// recent scrape.
|
||||
//
|
||||
// Monotonicity is usually guaranteed because if a bucket with upper bound
|
||||
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
|
||||
// have counted all c1 observations and perhaps more, so that c >= c1.
|
||||
//
|
||||
// Randomly interspersed partial sampling breaks that guarantee, and rate()
|
||||
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
|
||||
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
|
||||
// monotonicity is broken. It is exacerbated by rate() because under normal
|
||||
// operation, cumulative counting of buckets will cause the bucket counts to
|
||||
// diverge such that small differences from missing samples are not a problem.
|
||||
// rate() removes this divergence.)
|
||||
//
|
||||
// bucketQuantile depends on that monotonicity to do a binary search for the
|
||||
// bucket with the φ-quantile count, so breaking the monotonicity
|
||||
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
|
||||
//
|
||||
// As a somewhat hacky solution until ingestion is atomic per scrape, we
|
||||
// calculate the "envelope" of the histogram buckets, essentially removing
|
||||
// any decreases in the count between successive buckets.
|
||||
|
||||
func ensureMonotonic(buckets buckets) {
|
||||
max := buckets[0].count
|
||||
for i := 1; i < len(buckets); i++ {
|
||||
switch {
|
||||
case buckets[i].count > max:
|
||||
max = buckets[i].count
|
||||
case buckets[i].count < max:
|
||||
buckets[i].count = max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// End of copied code.
|
||||
|
||||
func readLines() []string {
|
||||
r := bufio.NewReader(os.Stdin)
|
||||
bytes := []byte{}
|
||||
lines := []string{}
|
||||
for {
|
||||
line, isPrefix, err := r.ReadLine()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
bytes = append(bytes, line...)
|
||||
if !isPrefix {
|
||||
str := strings.TrimSpace(string(bytes))
|
||||
if len(str) > 0 {
|
||||
lines = append(lines, str)
|
||||
bytes = []byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(bytes) > 0 {
|
||||
lines = append(lines, string(bytes))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func main() {
|
||||
lines := readLines()
|
||||
for _, text := range lines {
|
||||
// Example input
|
||||
// "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
|
||||
// bounds - counts - quantile
|
||||
parts := strings.Split(text, "\",")
|
||||
|
||||
var bucketNumbers []float64
|
||||
// Strip the ends with square brackets
|
||||
text = parts[0][2 : len(parts[0])-1]
|
||||
// Parse the bucket bounds
|
||||
for _, num := range strings.Split(text, ",") {
|
||||
num = strings.TrimSpace(num)
|
||||
number, err := strconv.ParseFloat(num, 64)
|
||||
if err == nil {
|
||||
bucketNumbers = append(bucketNumbers, number)
|
||||
}
|
||||
}
|
||||
|
||||
var bucketCounts []float64
|
||||
// Strip the ends with square brackets
|
||||
text = parts[1][2 : len(parts[1])-1]
|
||||
// Parse the bucket counts
|
||||
for _, num := range strings.Split(text, ",") {
|
||||
num = strings.TrimSpace(num)
|
||||
number, err := strconv.ParseFloat(num, 64)
|
||||
if err == nil {
|
||||
bucketCounts = append(bucketCounts, number)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the quantile
|
||||
q, err := strconv.ParseFloat(parts[2], 64)
|
||||
var b buckets
|
||||
|
||||
if err == nil {
|
||||
for i := 0; i < len(bucketNumbers); i++ {
|
||||
b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
|
||||
}
|
||||
}
|
||||
fmt.Println(bucketQuantile(q, b))
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,43 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api/alertmanager{
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
@@ -1,273 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- coordinator_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_tiny/coordinator
|
||||
- environment_tiny/common
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_tiny/broker
|
||||
- environment_tiny/common
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- historical_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_tiny/historical
|
||||
- environment_tiny/common
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- middle_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_tiny/middlemanager
|
||||
- environment_tiny/common
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_tiny/router
|
||||
- environment_tiny/common
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,269 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- coordinator_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_small/coordinator
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_small/broker
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- historical_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_small/historical
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- middle_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_small/middlemanager
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_small/router
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ./locust-scripts:/locust
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=100MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=1g
|
||||
DRUID_XMS=1g
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=128m
|
||||
DRUID_XMS=128m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,26 +0,0 @@
|
||||
# For S3 storage
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
||||
|
||||
|
||||
# druid_storage_type=s3
|
||||
# druid_storage_bucket=<s3-bucket-name>
|
||||
# druid_storage_baseKey=druid/segments
|
||||
|
||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
||||
# AWS_REGION=<s3-aws-region>
|
||||
|
||||
# druid_indexer_logs_type=s3
|
||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# For local storage
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/data/segments
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,50 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,51 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -36,9 +36,9 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
# is_arm64(){
|
||||
# [[ `uname -m` == 'arm64' ]]
|
||||
# }
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
@@ -102,7 +102,7 @@ check_os() {
|
||||
# The script should error out in case they aren't available
|
||||
check_ports_occupied() {
|
||||
local port_check_output
|
||||
local ports_pattern="80|3301|8080"
|
||||
local ports_pattern="3301|4317"
|
||||
|
||||
if is_mac; then
|
||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||
@@ -119,7 +119,7 @@ check_ports_occupied() {
|
||||
send_event "port_not_available"
|
||||
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
@@ -133,58 +133,44 @@ install_docker() {
|
||||
|
||||
|
||||
if [[ $package_manager == apt-get ]]; then
|
||||
apt_cmd="sudo apt-get --yes --quiet"
|
||||
apt_cmd="$sudo_cmd apt-get --yes --quiet"
|
||||
$apt_cmd update
|
||||
$apt_cmd install software-properties-common gnupg-agent
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
|
||||
sudo add-apt-repository \
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||
$sudo_cmd add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||
$apt_cmd update
|
||||
echo "Installing docker"
|
||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||
elif [[ $package_manager == zypper ]]; then
|
||||
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
|
||||
zypper_cmd="$sudo_cmd zypper --quiet --no-gpg-checks --non-interactive"
|
||||
echo "Installing docker"
|
||||
if [[ $os == sles ]]; then
|
||||
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
||||
os_arch="$(uname -i)"
|
||||
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||
SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||
fi
|
||||
$zypper_cmd install docker docker-runc containerd
|
||||
sudo systemctl enable docker.service
|
||||
$sudo_cmd systemctl enable docker.service
|
||||
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
||||
echo
|
||||
echo "Amazon Linux detected ... "
|
||||
echo
|
||||
# sudo yum install docker
|
||||
# sudo service docker start
|
||||
sudo amazon-linux-extras install docker
|
||||
# yum install docker
|
||||
# service docker start
|
||||
$sudo_cmd yum install -y amazon-linux-extras
|
||||
$sudo_cmd amazon-linux-extras enable docker
|
||||
$sudo_cmd yum install -y docker
|
||||
else
|
||||
|
||||
yum_cmd="sudo yum --assumeyes --quiet"
|
||||
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
|
||||
$yum_cmd install yum-utils
|
||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
install_docker_machine() {
|
||||
|
||||
echo "\nInstalling docker machine ..."
|
||||
|
||||
if [[ $os == "Mac" ]];then
|
||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
|
||||
chmod +x /usr/local/bin/docker-machine
|
||||
else
|
||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
|
||||
chmod +x /tmp/docker-machine
|
||||
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
||||
|
||||
fi
|
||||
|
||||
|
||||
}
|
||||
|
||||
install_docker_compose() {
|
||||
@@ -192,9 +178,9 @@ install_docker_compose() {
|
||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||
echo "++++++++++++++++++++++++"
|
||||
echo "Installing docker-compose"
|
||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
echo "docker-compose installed!"
|
||||
echo ""
|
||||
fi
|
||||
@@ -210,16 +196,28 @@ install_docker_compose() {
|
||||
}
|
||||
|
||||
start_docker() {
|
||||
echo "Starting Docker ..."
|
||||
if [ $os = "Mac" ]; then
|
||||
echo -e "🐳 Starting Docker ...\n"
|
||||
if [[ $os == "Mac" ]]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
if ! sudo systemctl is-active docker.service > /dev/null; then
|
||||
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||
echo "Starting docker service"
|
||||
sudo systemctl start docker.service
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
# if [[ -z $sudo_cmd ]]; then
|
||||
# docker ps > /dev/null && true
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# request_sudo
|
||||
# fi
|
||||
# fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
if ! docker ps > /dev/null && true; then
|
||||
request_sudo
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
wait_for_containers_start() {
|
||||
local timeout=$1
|
||||
|
||||
@@ -229,16 +227,6 @@ wait_for_containers_start() {
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
if [ $setup_type == 'druid' ]; then
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
||||
|
||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
||||
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
||||
fi
|
||||
((timeout--))
|
||||
@@ -249,31 +237,26 @@ wait_for_containers_start() {
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
if [ "$?" -ne 0 ]; then
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
set +o errexit
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
else
|
||||
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
while [[ $email == "" ]]
|
||||
do
|
||||
if [[ $email == "" ]]; then
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
while [[ $email == "" ]]
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
fi
|
||||
|
||||
send_event "installation_support"
|
||||
|
||||
@@ -284,33 +267,82 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
fi
|
||||
}
|
||||
|
||||
request_sudo() {
|
||||
if hash sudo 2>/dev/null; then
|
||||
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompt."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
# exit 1;
|
||||
# fi
|
||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
echo -e "Got it! Thanks!! 🙏\n"
|
||||
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
echo "🟡 Running installer with non-sudo permissions."
|
||||
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
|
||||
echo ""
|
||||
else
|
||||
sudo_cmd="sudo"
|
||||
fi
|
||||
|
||||
# Checking OS and assigning package manager
|
||||
desired_os=0
|
||||
os=""
|
||||
email=""
|
||||
echo -e "Detecting your OS ..."
|
||||
echo -e "🌏 Detecting your OS ...\n"
|
||||
check_os
|
||||
|
||||
# Obtain unique installation id
|
||||
sysinfo="$(uname -a)"
|
||||
if [ $? -ne 0 ]; then
|
||||
# sysinfo="$(uname -a)"
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# uuid="$(uuidgen)"
|
||||
# uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
# sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
# fi
|
||||
if ! sysinfo="$(uname -a)"; then
|
||||
uuid="$(uuidgen)"
|
||||
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
fi
|
||||
|
||||
digest_cmd=""
|
||||
if hash shasum 2>/dev/null; then
|
||||
digest_cmd="shasum -a 256"
|
||||
elif hash sha256sum 2>/dev/null; then
|
||||
digest_cmd="sha256sum"
|
||||
elif hash openssl 2>/dev/null; then
|
||||
digest_cmd="openssl dgst -sha256"
|
||||
fi
|
||||
|
||||
if [[ -z $digest_cmd ]]; then
|
||||
SIGNOZ_INSTALLATION_ID="$sysinfo"
|
||||
else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1)
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
@@ -323,8 +355,6 @@ fi
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# else
|
||||
# setup_type='druid'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
@@ -364,13 +394,7 @@ send_event() {
|
||||
'installation_error_checks')
|
||||
event="Installation Error - Checks"
|
||||
error="Containers not started"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
others='"data": "some_checks",'
|
||||
else
|
||||
supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
||||
others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",'
|
||||
fi
|
||||
others='"data": "some_checks",'
|
||||
;;
|
||||
'installation_support')
|
||||
event="Installation Support"
|
||||
@@ -389,7 +413,7 @@ send_event() {
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$error" != "" ]; then
|
||||
if [[ "$error" != "" ]]; then
|
||||
error='"error": "'"$error"'", '
|
||||
fi
|
||||
|
||||
@@ -412,15 +436,28 @@ fi
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||
request_sudo
|
||||
install_docker
|
||||
else
|
||||
# enable docker without sudo from next reboot
|
||||
sudo usermod -aG docker "${USER}"
|
||||
elif is_mac; then
|
||||
echo ""
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/docker-for-mac/install/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
send_event "docker_not_installed"
|
||||
exit 1
|
||||
else
|
||||
echo ""
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "Docker must be installed manually on your machine to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/get-docker/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
send_event "docker_not_installed"
|
||||
exit 1
|
||||
fi
|
||||
@@ -428,43 +465,25 @@ fi
|
||||
|
||||
# Install docker-compose
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
|
||||
|
||||
start_docker
|
||||
|
||||
|
||||
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
|
||||
else
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
||||
fi
|
||||
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -473,11 +492,9 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
@@ -494,21 +511,15 @@ else
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
echo "ℹ️ By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
fi
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
||||
fi
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "👉 Need help Getting Started?"
|
||||
echo "👉 Need help in Getting Started?"
|
||||
echo -e "Join us on Slack https://signoz.io/slack"
|
||||
echo ""
|
||||
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
|
||||
|
||||
37
ee/LICENSE
Normal file
37
ee/LICENSE
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
The SigNoz Enterprise license (the "Enterprise License")
|
||||
Copyright (c) 2020 - present SigNoz Inc.
|
||||
|
||||
With regard to the SigNoz Software:
|
||||
|
||||
This software and associated documentation files (the "Software") may only be
|
||||
used in production, if you (and any entity that you represent) have agreed to,
|
||||
and are in compliance with, the SigNoz Subscription Terms of Service, available
|
||||
via email (hello@signoz.io) (the "Enterprise Terms"), or other
|
||||
agreement governing the use of the Software, as agreed by you and SigNoz,
|
||||
and otherwise have a valid SigNoz Enterprise license for the
|
||||
correct number of user seats. Subject to the foregoing sentence, you are free to
|
||||
modify this Software and publish patches to the Software. You agree that SigNoz
|
||||
and/or its licensors (as applicable) retain all right, title and interest in and
|
||||
to all such modifications and/or patches, and all such modifications and/or
|
||||
patches may only be used, copied, modified, displayed, distributed, or otherwise
|
||||
exploited with a valid SigNoz Enterprise license for the correct
|
||||
number of user seats. Notwithstanding the foregoing, you may copy and modify
|
||||
the Software for development and testing purposes, without requiring a
|
||||
subscription. You agree that SigNoz and/or its licensors (as applicable) retain
|
||||
all right, title and interest in and to all such modifications. You are not
|
||||
granted any other rights beyond what is expressly stated herein. Subject to the
|
||||
foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
|
||||
and/or sell the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
For all third party components incorporated into the SigNoz Software, those
|
||||
components are licensed under the original license provided by the owner of the
|
||||
applicable component.
|
||||
4
ee/query-service/.dockerignore
Normal file
4
ee/query-service/.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
README.md
|
||||
signoz.db
|
||||
bin
|
||||
48
ee/query-service/Dockerfile
Normal file
48
ee/query-service/Dockerfile
Normal file
@@ -0,0 +1,48 @@
|
||||
FROM golang:1.17-buster AS builder
|
||||
|
||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
||||
ARG LD_FLAGS
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
ENV CGO_ENABLED=1
|
||||
ENV GOPATH=/go
|
||||
|
||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
|
||||
|
||||
# Prepare and enter src directory
|
||||
WORKDIR /go/src/github.com/signoz/signoz
|
||||
|
||||
# Add the sources and proceed with build
|
||||
ADD . .
|
||||
RUN cd ee/query-service \
|
||||
&& go build -tags timetzdata -a -o ./bin/query-service \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
|
||||
&& chmod +x ./bin/query-service
|
||||
|
||||
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.7
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
|
||||
# add ca-certificates in case you need them
|
||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||
|
||||
# set working directory
|
||||
WORKDIR /root
|
||||
|
||||
# copy the binary from builder
|
||||
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
|
||||
|
||||
# copy prometheus YAML config
|
||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||
|
||||
# run the binary
|
||||
ENTRYPOINT ["./query-service"]
|
||||
|
||||
CMD ["-config", "../config/prometheus.yml"]
|
||||
# CMD ["./query-service -config /root/config/prometheus.yml"]
|
||||
|
||||
EXPOSE 8080
|
||||
131
ee/query-service/app/api/api.go
Normal file
131
ee/query-service/app/api/api.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
opts APIHandlerOptions
|
||||
baseapp.APIHandler
|
||||
}
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ah := &APIHandler{
|
||||
opts: opts,
|
||||
APIHandler: *baseHandler,
|
||||
}
|
||||
return ah, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) FF() baseint.FeatureLookup {
|
||||
return ah.opts.FeatureFlags
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RM() *rules.Manager {
|
||||
return ah.opts.RulesManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) LM() *license.Manager {
|
||||
return ah.opts.LicenseManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||
return ah.opts.AppDao
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
err := ah.FF().CheckFeature(f)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
baseapp.AdminAccess(ah.listLicenses)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
baseapp.AdminAccess(ah.applyLicense)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
baseapp.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
baseapp.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
baseapp.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/complete/google",
|
||||
baseapp.OpenAccess(ah.receiveGoogleAuth)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
baseapp.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
baseapp.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
baseapp.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
baseapp.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", baseapp.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v2/metrics/query_range", baseapp.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
|
||||
|
||||
ah.APIHandler.RegisterRoutes(router)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
|
||||
}
|
||||
332
ee/query-service/app/api/auth.go
Normal file
332
ee/query-service/app/api/auth.go
Normal file
@@ -0,0 +1,332 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func parseRequest(r *http.Request, req interface{}) error {
|
||||
defer r.Body.Close()
|
||||
requestBody, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
return err
|
||||
}
|
||||
|
||||
// loginUser overrides base handler and considers SSO case.
|
||||
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := basemodel.LoginRequest{}
|
||||
err := parseRequest(r, &req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||
var apierr basemodel.BaseApiError
|
||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||
if apierr != nil && !apierr.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// if all looks good, call auth
|
||||
resp, err := auth.Login(ctx, &req)
|
||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// registerUser registers a user and responds with a precheck
|
||||
// so the front-end can decide the login method
|
||||
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
ah.APIHandler.Register(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var req *baseauth.RegisterRequest
|
||||
|
||||
defer r.Body.Close()
|
||||
requestBody, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
zap.S().Errorf("received no input in api\n", err)
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("received invalid user registration request", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil || invite == nil {
|
||||
zap.S().Errorf("failed to validate invite token", err)
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||
if apierr != nil {
|
||||
zap.S().Errorf("failed to get domain from email", apierr)
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &model.PrecheckResponse{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// so is enabled, create user and respond precheck data
|
||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var precheckError basemodel.BaseApiError
|
||||
|
||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||
if precheckError != nil {
|
||||
RespondError(w, precheckError, precheckResp)
|
||||
}
|
||||
|
||||
} else {
|
||||
// no-sso, validate password
|
||||
if err := auth.ValidatePassword(req.Password); err != nil {
|
||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, registerError := baseauth.Register(ctx, req)
|
||||
if !registerError.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp.IsUser = true
|
||||
}
|
||||
|
||||
ah.Respond(w, precheckResp)
|
||||
}
|
||||
|
||||
// getInvite returns the invite object details for the given invite token. We do not need to
|
||||
// protect this API because invite token itself is meant to be private.
|
||||
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
token := mux.Vars(r)["token"]
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
ctx := context.Background()
|
||||
|
||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp := model.GettableInvitation{
|
||||
InvitationResponseObject: inviteObject,
|
||||
}
|
||||
|
||||
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
|
||||
resp.Precheck = precheck
|
||||
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// PrecheckLogin enables browser login page to display appropriate
|
||||
// login methods
|
||||
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
email := r.URL.Query().Get("email")
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
|
||||
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
base64.StdEncoding.Encode(dst, ssoError)
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveGoogleAuth completes google OAuth response and forwards a request
|
||||
// to front-end to sign user in
|
||||
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
q := r.URL.Query()
|
||||
if errType := q.Get("error"); errType != "" {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
relayState := q.Get("state")
|
||||
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// now that we have domain, use domain to fetch sso settings.
|
||||
// prepare google callback handler using parsedState -
|
||||
// which contains redirect URL (front-end endpoint)
|
||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||
|
||||
identity, err := callbackHandler.HandleCallback(r)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// the relay state is sent when a login request is submitted to
|
||||
// Idp.
|
||||
relayState := r.FormValue("RelayState")
|
||||
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
email := assertionInfo.NameID
|
||||
if email == "" {
|
||||
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
90
ee/query-service/app/api/domains.go
Normal file
90
ee/query-service/app/api/domains.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
orgId := mux.Vars(r)["orgId"]
|
||||
domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, domains)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, domains)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.OrgDomain{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.ValidNew(); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.OrgDomain{Id: domainId}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
req.Id = domainId
|
||||
if err := req.Valid(nil); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, nil)
|
||||
}
|
||||
10
ee/query-service/app/api/featureFlags.go
Normal file
10
ee/query-service/app/api/featureFlags.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
featureSet := ah.FF().GetFeatureFlags()
|
||||
ah.Respond(w, featureSet)
|
||||
}
|
||||
40
ee/query-service/app/api/license.go
Normal file
40
ee/query-service/app/api/license.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
}
|
||||
ah.Respond(w, licenses)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
var l model.License
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if l.Key == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
license, apiError := ah.LM().Activate(ctx, l.Key)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, license)
|
||||
}
|
||||
236
ee/query-service/app/api/metrics.go
Normal file
236
ee/query-service/app/api/metrics.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/parser"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
querytemplate "go.signoz.io/signoz/pkg/query-service/utils/queryTemplate"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
|
||||
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
|
||||
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
|
||||
ah.APIHandler.QueryRangeMetricsV2(w, r)
|
||||
return
|
||||
}
|
||||
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
|
||||
|
||||
if apiErrorObj != nil {
|
||||
zap.S().Errorf(apiErrorObj.Err.Error())
|
||||
RespondError(w, apiErrorObj, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// prometheus instant query needs same timestamp
|
||||
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
|
||||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.PROM {
|
||||
metricsQueryRangeParams.Start = metricsQueryRangeParams.End
|
||||
}
|
||||
|
||||
// round up the end to nearest multiple
|
||||
if metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER {
|
||||
end := (metricsQueryRangeParams.End) / 1000
|
||||
step := metricsQueryRangeParams.Step
|
||||
metricsQueryRangeParams.End = (end / step * step) * 1000
|
||||
}
|
||||
|
||||
type channelResult struct {
|
||||
Series []*basemodel.Series
|
||||
TableName string
|
||||
Err error
|
||||
Name string
|
||||
Query string
|
||||
}
|
||||
|
||||
execClickHouseQueries := func(queries map[string]string) ([]*basemodel.Series, []string, error, map[string]string) {
|
||||
var seriesList []*basemodel.Series
|
||||
var tableName []string
|
||||
ch := make(chan channelResult, len(queries))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for name, query := range queries {
|
||||
wg.Add(1)
|
||||
go func(name, query string) {
|
||||
defer wg.Done()
|
||||
seriesList, tableName, err := ah.opts.DataConnector.GetMetricResultEE(r.Context(), query)
|
||||
for _, series := range seriesList {
|
||||
series.QueryName = name
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
|
||||
return
|
||||
}
|
||||
ch <- channelResult{Series: seriesList, TableName: tableName}
|
||||
}(name, query)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
continue
|
||||
}
|
||||
seriesList = append(seriesList, r.Series...)
|
||||
tableName = append(tableName, r.TableName)
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
|
||||
}
|
||||
return seriesList, tableName, nil, nil
|
||||
}
|
||||
|
||||
execPromQueries := func(metricsQueryRangeParams *basemodel.QueryRangeParamsV2) ([]*basemodel.Series, error, map[string]string) {
|
||||
var seriesList []*basemodel.Series
|
||||
ch := make(chan channelResult, len(metricsQueryRangeParams.CompositeMetricQuery.PromQueries))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for name, query := range metricsQueryRangeParams.CompositeMetricQuery.PromQueries {
|
||||
if query.Disabled {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(name string, query *basemodel.PromQuery) {
|
||||
var seriesList []*basemodel.Series
|
||||
defer wg.Done()
|
||||
tmpl := template.New("promql-query")
|
||||
tmpl, tmplErr := tmpl.Parse(query.Query)
|
||||
if tmplErr != nil {
|
||||
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
|
||||
return
|
||||
}
|
||||
var queryBuf bytes.Buffer
|
||||
tmplErr = tmpl.Execute(&queryBuf, metricsQueryRangeParams.Variables)
|
||||
if tmplErr != nil {
|
||||
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
|
||||
return
|
||||
}
|
||||
query.Query = queryBuf.String()
|
||||
queryModel := basemodel.QueryRangeParams{
|
||||
Start: time.UnixMilli(metricsQueryRangeParams.Start),
|
||||
End: time.UnixMilli(metricsQueryRangeParams.End),
|
||||
Step: time.Duration(metricsQueryRangeParams.Step * int64(time.Second)),
|
||||
Query: query.Query,
|
||||
}
|
||||
promResult, _, err := ah.opts.DataConnector.GetQueryRangeResult(r.Context(), &queryModel)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query.Query}
|
||||
return
|
||||
}
|
||||
matrix, _ := promResult.Matrix()
|
||||
for _, v := range matrix {
|
||||
var s basemodel.Series
|
||||
s.QueryName = name
|
||||
s.Labels = v.Metric.Copy().Map()
|
||||
for _, p := range v.Points {
|
||||
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
|
||||
}
|
||||
seriesList = append(seriesList, &s)
|
||||
}
|
||||
ch <- channelResult{Series: seriesList}
|
||||
}(name, query)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
continue
|
||||
}
|
||||
seriesList = append(seriesList, r.Series...)
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
|
||||
}
|
||||
return seriesList, nil, nil
|
||||
}
|
||||
|
||||
var seriesList []*basemodel.Series
|
||||
var tableName []string
|
||||
var err error
|
||||
var errQuriesByName map[string]string
|
||||
switch metricsQueryRangeParams.CompositeMetricQuery.QueryType {
|
||||
case basemodel.QUERY_BUILDER:
|
||||
runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||
if runQueries.Err != nil {
|
||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: runQueries.Err}, nil)
|
||||
return
|
||||
}
|
||||
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(runQueries.Queries)
|
||||
|
||||
case basemodel.CLICKHOUSE:
|
||||
queries := make(map[string]string)
|
||||
|
||||
for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries {
|
||||
if chQuery.Disabled {
|
||||
continue
|
||||
}
|
||||
tmpl := template.New("clickhouse-query")
|
||||
tmpl, err := tmpl.Parse(chQuery.Query)
|
||||
if err != nil {
|
||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
var query bytes.Buffer
|
||||
|
||||
// replace go template variables
|
||||
querytemplate.AssignReservedVars(metricsQueryRangeParams)
|
||||
|
||||
err = tmpl.Execute(&query, metricsQueryRangeParams.Variables)
|
||||
if err != nil {
|
||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
queries[name] = query.String()
|
||||
}
|
||||
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(queries)
|
||||
case basemodel.PROM:
|
||||
seriesList, err, errQuriesByName = execPromQueries(metricsQueryRangeParams)
|
||||
default:
|
||||
err = fmt.Errorf("invalid query type")
|
||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, errQuriesByName)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
apiErrObj := &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
}
|
||||
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
|
||||
len(seriesList) > 1 &&
|
||||
(metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER ||
|
||||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.CLICKHOUSE) {
|
||||
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
type ResponseFormat struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []*basemodel.Series `json:"result"`
|
||||
TableName []string `json:"tableName"`
|
||||
}
|
||||
resp := ResponseFormat{ResultType: "matrix", Result: seriesList, TableName: tableName}
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
12
ee/query-service/app/api/response.go
Normal file
12
ee/query-service/app/api/response.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
||||
baseapp.RespondError(w, apiErr, data)
|
||||
}
|
||||
39
ee/query-service/app/api/traces.go
Normal file
39
ee/query-service/app/api/traces.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||
ah.APIHandler.SearchTraces(w, r)
|
||||
return
|
||||
}
|
||||
traceId, spanId, levelUpInt, levelDownInt, err := baseapp.ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
|
||||
if err != nil {
|
||||
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
|
||||
return
|
||||
}
|
||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
|
||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
401
ee/query-service/app/db/metrics.go
Normal file
401
ee/query-service/app/db/metrics.go
Normal file
@@ -0,0 +1,401 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// GetMetricResultEE runs the query and returns list of time series
|
||||
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
|
||||
|
||||
defer utils.Elapsed("GetMetricResult")()
|
||||
zap.S().Infof("Executing metric result query: %s", query)
|
||||
|
||||
var hash string
|
||||
// If getSubTreeSpans function is used in the clickhouse query
|
||||
if strings.Index(query, "getSubTreeSpans(") != -1 {
|
||||
var err error
|
||||
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
|
||||
if err == fmt.Errorf("No spans found for the given query") {
|
||||
return nil, "", nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
rows, err := r.conn.Query(ctx, query)
|
||||
zap.S().Debug(query)
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing query: ", err)
|
||||
return nil, "", fmt.Errorf("error in processing query")
|
||||
}
|
||||
|
||||
var (
|
||||
columnTypes = rows.ColumnTypes()
|
||||
columnNames = rows.Columns()
|
||||
vars = make([]interface{}, len(columnTypes))
|
||||
)
|
||||
for i := range columnTypes {
|
||||
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
|
||||
}
|
||||
// when group by is applied, each combination of cartesian product
|
||||
// of attributes is separate series. each item in metricPointsMap
|
||||
// represent a unique series.
|
||||
metricPointsMap := make(map[string][]basemodel.MetricPoint)
|
||||
// attribute key-value pairs for each group selection
|
||||
attributesMap := make(map[string]map[string]string)
|
||||
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(vars...); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var groupBy []string
|
||||
var metricPoint basemodel.MetricPoint
|
||||
groupAttributes := make(map[string]string)
|
||||
// Assuming that the end result row contains a timestamp, value and option labels
|
||||
// Label key and value are both strings.
|
||||
for idx, v := range vars {
|
||||
colName := columnNames[idx]
|
||||
switch v := v.(type) {
|
||||
case *string:
|
||||
// special case for returning all labels
|
||||
if colName == "fullLabels" {
|
||||
var metric map[string]string
|
||||
err := json.Unmarshal([]byte(*v), &metric)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for key, val := range metric {
|
||||
groupBy = append(groupBy, val)
|
||||
groupAttributes[key] = val
|
||||
}
|
||||
} else {
|
||||
groupBy = append(groupBy, *v)
|
||||
groupAttributes[colName] = *v
|
||||
}
|
||||
case *time.Time:
|
||||
metricPoint.Timestamp = v.UnixMilli()
|
||||
case *float64:
|
||||
metricPoint.Value = *v
|
||||
case **float64:
|
||||
// ch seems to return this type when column is derived from
|
||||
// SELECT count(*)/ SELECT count(*)
|
||||
floatVal := *v
|
||||
if floatVal != nil {
|
||||
metricPoint.Value = *floatVal
|
||||
}
|
||||
case *float32:
|
||||
float32Val := float32(*v)
|
||||
metricPoint.Value = float64(float32Val)
|
||||
case *uint8, *uint64, *uint16, *uint32:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
|
||||
}
|
||||
case *int8, *int16, *int32, *int64:
|
||||
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
|
||||
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
|
||||
} else {
|
||||
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
|
||||
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
|
||||
}
|
||||
default:
|
||||
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
|
||||
}
|
||||
}
|
||||
sort.Strings(groupBy)
|
||||
key := strings.Join(groupBy, "")
|
||||
attributesMap[key] = groupAttributes
|
||||
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
|
||||
}
|
||||
|
||||
var seriesList []*basemodel.Series
|
||||
for key := range metricPointsMap {
|
||||
points := metricPointsMap[key]
|
||||
// first point in each series could be invalid since the
|
||||
// aggregations are applied with point from prev series
|
||||
if len(points) != 0 && len(points) > 1 {
|
||||
points = points[1:]
|
||||
}
|
||||
attributes := attributesMap[key]
|
||||
series := basemodel.Series{Labels: attributes, Points: points}
|
||||
seriesList = append(seriesList, &series)
|
||||
}
|
||||
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||
// if err != nil {
|
||||
// zap.S().Error("Error in dropping temporary table: ", err)
|
||||
// return nil, err
|
||||
// }
|
||||
if hash == "" {
|
||||
return seriesList, hash, nil
|
||||
} else {
|
||||
return seriesList, "getSubTreeSpans" + hash, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
|
||||
|
||||
zap.S().Debugf("Executing getSubTreeSpans function")
|
||||
|
||||
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
|
||||
|
||||
// process the query to fetch subTree query
|
||||
var subtreeInput string
|
||||
query, subtreeInput, hash = processQuery(query, hash)
|
||||
|
||||
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
|
||||
if err != nil {
|
||||
zap.S().Error("Error in dropping temporary table: ", err)
|
||||
return query, hash, err
|
||||
}
|
||||
|
||||
// Create temporary table to store the getSubTreeSpans() results
|
||||
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
|
||||
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
|
||||
if err != nil {
|
||||
zap.S().Error("Error in creating temporary table: ", err)
|
||||
return query, hash, err
|
||||
}
|
||||
|
||||
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
|
||||
getSpansSubQuery := subtreeInput
|
||||
// Execute the subTree query
|
||||
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
|
||||
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
|
||||
|
||||
// zap.S().Info(getSpansSubQuery)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
var searchScanResponses []basemodel.SearchSpanDBResponseItem
|
||||
|
||||
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
|
||||
// Fetch all the spans from of same TraceID so that we can build subtree
|
||||
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
|
||||
if len(getSpansSubQueryDBResponses) == 0 {
|
||||
return query, hash, fmt.Errorf("No spans found for the given query")
|
||||
}
|
||||
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
|
||||
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return query, hash, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
// Process model to fetch the spans
|
||||
zap.S().Debugf("Processing model to fetch the spans")
|
||||
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
|
||||
for _, item := range searchScanResponses {
|
||||
var jsonItem basemodel.SearchSpanResponseItem
|
||||
json.Unmarshal([]byte(item.Model), &jsonItem)
|
||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
|
||||
if jsonItem.Events == nil {
|
||||
jsonItem.Events = []string{}
|
||||
}
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
}
|
||||
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
|
||||
// Use map to store pointer to the spans to avoid duplicates and save memory
|
||||
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
|
||||
|
||||
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
|
||||
if err != nil {
|
||||
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
|
||||
return query, hash, err
|
||||
}
|
||||
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
|
||||
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
|
||||
if err != nil {
|
||||
zap.S().Error("Error in preparing batch statement: ", err)
|
||||
return query, hash, err
|
||||
}
|
||||
for _, span := range treeSearchResponse {
|
||||
var parentID string
|
||||
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
|
||||
parentID = span.References[0].SpanId
|
||||
}
|
||||
err = statement.Append(
|
||||
time.Unix(0, int64(span.TimeUnixNano)),
|
||||
span.TraceID,
|
||||
span.SpanID,
|
||||
parentID,
|
||||
span.RootSpanID,
|
||||
span.ServiceName,
|
||||
span.Name,
|
||||
span.RootName,
|
||||
uint64(span.DurationNano),
|
||||
int8(span.Kind),
|
||||
span.TagMap,
|
||||
span.Events,
|
||||
)
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return query, hash, err
|
||||
}
|
||||
}
|
||||
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
|
||||
err = statement.Send()
|
||||
if err != nil {
|
||||
zap.S().Error("Error in sending statement: ", err)
|
||||
return query, hash, err
|
||||
}
|
||||
return query, hash, nil
|
||||
}
|
||||
|
||||
func processQuery(query string, hash string) (string, string, string) {
|
||||
re3 := regexp.MustCompile(`getSubTreeSpans`)
|
||||
|
||||
submatchall3 := re3.FindAllStringIndex(query, -1)
|
||||
getSubtreeSpansMatchIndex := submatchall3[0][1]
|
||||
|
||||
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
|
||||
|
||||
sqlCompleteIndex := 0
|
||||
countParenthesisImbalance := 0
|
||||
for i, char := range query2countParenthesis {
|
||||
|
||||
if string(char) == "(" {
|
||||
countParenthesisImbalance += 1
|
||||
}
|
||||
if string(char) == ")" {
|
||||
countParenthesisImbalance -= 1
|
||||
}
|
||||
if countParenthesisImbalance == 0 {
|
||||
sqlCompleteIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
|
||||
|
||||
// hash the subtreeInput
|
||||
hmd5 := md5.Sum([]byte(subtreeInput))
|
||||
hash = fmt.Sprintf("%x", hmd5)
|
||||
|
||||
// Reformat the query to use the getSubTreeSpans function
|
||||
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
|
||||
return query, subtreeInput, hash
|
||||
}
|
||||
|
||||
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
|
||||
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
|
||||
|
||||
var spans []*model.SpanForTraceDetails
|
||||
for _, spanItem := range payload {
|
||||
var parentID string
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
ServiceName: spanItem.ServiceName,
|
||||
Name: spanItem.Name,
|
||||
Kind: spanItem.Kind,
|
||||
DurationNano: spanItem.DurationNano,
|
||||
TagMap: spanItem.TagMap,
|
||||
ParentID: parentID,
|
||||
Events: spanItem.Events,
|
||||
HasError: spanItem.HasError,
|
||||
}
|
||||
spans = append(spans, span)
|
||||
}
|
||||
|
||||
zap.S().Debug("Building Tree")
|
||||
roots, err := buildSpanTrees(&spans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
|
||||
// Every span which was fetched from getSubTree Input SQL query is considered root
|
||||
// For each root, get the subtree spans
|
||||
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
|
||||
// Search target span object in the tree
|
||||
for _, root := range roots {
|
||||
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
|
||||
if targetSpan != nil {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if targetSpan == nil {
|
||||
return nil, nil
|
||||
}
|
||||
// Build subtree for the target span
|
||||
// Mark the target span as root by setting parent ID as empty string
|
||||
targetSpan.ParentID = ""
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
|
||||
// Get the subtree child spans
|
||||
for i := 0; len(preParents) != 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
children = append(children, parent.Children...)
|
||||
parents = append(parents, parent.Children...)
|
||||
}
|
||||
preParents = parents
|
||||
}
|
||||
|
||||
resultSpans := children
|
||||
// Add the target span to the result spans
|
||||
resultSpans = append(resultSpans, targetSpan)
|
||||
|
||||
for _, item := range resultSpans {
|
||||
references := []basemodel.OtelSpanRef{
|
||||
{
|
||||
TraceId: item.TraceID,
|
||||
SpanId: item.ParentID,
|
||||
RefType: "CHILD_OF",
|
||||
},
|
||||
}
|
||||
|
||||
if item.Events == nil {
|
||||
item.Events = []string{}
|
||||
}
|
||||
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
|
||||
TimeUnixNano: item.TimeUnixNano,
|
||||
SpanID: item.SpanID,
|
||||
TraceID: item.TraceID,
|
||||
ServiceName: item.ServiceName,
|
||||
Name: item.Name,
|
||||
Kind: item.Kind,
|
||||
References: references,
|
||||
DurationNano: item.DurationNano,
|
||||
TagMap: item.TagMap,
|
||||
Events: item.Events,
|
||||
HasError: item.HasError,
|
||||
RootSpanID: getSpansSubQueryDBResponse.SpanID,
|
||||
RootName: targetSpan.Name,
|
||||
}
|
||||
}
|
||||
}
|
||||
return searchSpansResult, nil
|
||||
}
|
||||
29
ee/query-service/app/db/reader.go
Normal file
29
ee/query-service/app/db/reader.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
type ClickhouseReader struct {
|
||||
conn clickhouse.Conn
|
||||
appdb *sqlx.DB
|
||||
*basechr.ClickHouseReader
|
||||
}
|
||||
|
||||
func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
ClickHouseReader: ch,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) Start(readerReady chan bool) {
|
||||
r.ClickHouseReader.Start(readerReady)
|
||||
}
|
||||
222
ee/query-service/app/db/trace.go
Normal file
222
ee/query-service/app/db/trace.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
|
||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||
var spans []*model.SpanForTraceDetails
|
||||
|
||||
// Build a slice of spans from the payload
|
||||
for _, spanItem := range payload {
|
||||
var parentID string
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
ServiceName: spanItem.ServiceName,
|
||||
Name: spanItem.Name,
|
||||
Kind: spanItem.Kind,
|
||||
DurationNano: spanItem.DurationNano,
|
||||
TagMap: spanItem.TagMap,
|
||||
ParentID: parentID,
|
||||
Events: spanItem.Events,
|
||||
HasError: spanItem.HasError,
|
||||
}
|
||||
spans = append(spans, span)
|
||||
}
|
||||
|
||||
// Build span trees from the spans
|
||||
roots, err := buildSpanTrees(&spans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
|
||||
// Find the target span in the span trees
|
||||
for _, root := range roots {
|
||||
targetSpan, err = breadthFirstSearch(root, targetSpanId)
|
||||
if targetSpan != nil {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
zap.S().Error("Error during BreadthFirstSearch(): ", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If the target span is not found, return span not found error
|
||||
if targetSpan == nil {
|
||||
return nil, errors.New("Span not found")
|
||||
}
|
||||
|
||||
// Build the final result
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
|
||||
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
|
||||
preParent := targetSpan
|
||||
for i := 0; i < levelUp+1; i++ {
|
||||
if i == levelUp {
|
||||
preParent.ParentID = ""
|
||||
}
|
||||
if spanLimit-len(preParent.Children) <= 0 {
|
||||
parents = append(parents, preParent)
|
||||
parents = append(parents, preParent.Children[:spanLimit]...)
|
||||
spanLimit -= (len(preParent.Children[:spanLimit]) + 1)
|
||||
preParent.ParentID = ""
|
||||
break
|
||||
}
|
||||
parents = append(parents, preParent)
|
||||
parents = append(parents, preParent.Children...)
|
||||
spanLimit -= (len(preParent.Children) + 1)
|
||||
preParent = preParent.ParentSpan
|
||||
if preParent == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Get the child spans of the target span until the given levelDown and spanLimit
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
|
||||
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
if spanLimit-len(parent.Children) <= 0 {
|
||||
children = append(children, parent.Children[:spanLimit]...)
|
||||
spanLimit -= len(parent.Children[:spanLimit])
|
||||
break
|
||||
}
|
||||
children = append(children, parent.Children...)
|
||||
parents = append(parents, parent.Children...)
|
||||
}
|
||||
preParents = parents
|
||||
}
|
||||
|
||||
// Store the final list of spans in the resultSpanSet map to avoid duplicates
|
||||
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
|
||||
resultSpansSet[targetSpan] = struct{}{}
|
||||
for _, parent := range parents {
|
||||
resultSpansSet[parent] = struct{}{}
|
||||
}
|
||||
for _, child := range children {
|
||||
resultSpansSet[child] = struct{}{}
|
||||
}
|
||||
|
||||
searchSpansResult := []basemodel.SearchSpansResult{{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
|
||||
Events: make([][]interface{}, len(resultSpansSet)),
|
||||
},
|
||||
}
|
||||
|
||||
// Convert the resultSpansSet map to searchSpansResult
|
||||
i := 0 // index for spans
|
||||
for item := range resultSpansSet {
|
||||
references := []basemodel.OtelSpanRef{
|
||||
{
|
||||
TraceId: item.TraceID,
|
||||
SpanId: item.ParentID,
|
||||
RefType: "CHILD_OF",
|
||||
},
|
||||
}
|
||||
|
||||
referencesStringArray := []string{}
|
||||
for _, item := range references {
|
||||
referencesStringArray = append(referencesStringArray, item.ToString())
|
||||
}
|
||||
keys := make([]string, 0, len(item.TagMap))
|
||||
values := make([]string, 0, len(item.TagMap))
|
||||
|
||||
for k, v := range item.TagMap {
|
||||
keys = append(keys, k)
|
||||
values = append(values, v)
|
||||
}
|
||||
if item.Events == nil {
|
||||
item.Events = []string{}
|
||||
}
|
||||
searchSpansResult[0].Events[i] = []interface{}{
|
||||
item.TimeUnixNano,
|
||||
item.SpanID,
|
||||
item.TraceID,
|
||||
item.ServiceName,
|
||||
item.Name,
|
||||
strconv.Itoa(int(item.Kind)),
|
||||
strconv.FormatInt(item.DurationNano, 10),
|
||||
keys,
|
||||
values,
|
||||
referencesStringArray,
|
||||
item.Events,
|
||||
item.HasError,
|
||||
}
|
||||
i++ // increment index
|
||||
}
|
||||
return searchSpansResult, nil
|
||||
}
|
||||
|
||||
// buildSpanTrees builds trees of spans from a list of spans.
|
||||
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
|
||||
|
||||
// Build a map of spanID to span for fast lookup
|
||||
var roots []*model.SpanForTraceDetails
|
||||
spans := *spansPtr
|
||||
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
|
||||
|
||||
for _, span := range spans {
|
||||
if span.ParentID == "" {
|
||||
roots = append(roots, span)
|
||||
}
|
||||
mapOfSpans[span.SpanID] = span
|
||||
}
|
||||
|
||||
// Build the span tree by adding children to the parent spans
|
||||
for _, span := range spans {
|
||||
if span.ParentID == "" {
|
||||
continue
|
||||
}
|
||||
parent := mapOfSpans[span.ParentID]
|
||||
|
||||
// If the parent span is not found, add current span to list of roots
|
||||
if parent == nil {
|
||||
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
|
||||
roots = append(roots, span)
|
||||
span.ParentID = ""
|
||||
continue
|
||||
}
|
||||
|
||||
span.ParentSpan = parent
|
||||
parent.Children = append(parent.Children, span)
|
||||
}
|
||||
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
|
||||
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
|
||||
queue := []*model.SpanForTraceDetails{spansPtr}
|
||||
visited := make(map[string]bool)
|
||||
|
||||
for len(queue) > 0 {
|
||||
current := queue[0]
|
||||
visited[current.SpanID] = true
|
||||
queue = queue[1:]
|
||||
if current.SpanID == targetId {
|
||||
return current, nil
|
||||
}
|
||||
|
||||
for _, child := range current.Children {
|
||||
if ok, _ := visited[child.SpanID]; !ok {
|
||||
queue = append(queue, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
513
ee/query-service/app/server.go
Normal file
513
ee/query-service/app/server.go
Normal file
@@ -0,0 +1,513 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ServerOptions struct {
|
||||
PromConfigPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
type Server struct {
|
||||
serverOptions *ServerOptions
|
||||
conn net.Listener
|
||||
ruleManager *rules.Manager
|
||||
separatePorts bool
|
||||
|
||||
// public http router
|
||||
httpConn net.Listener
|
||||
httpServer *http.Server
|
||||
|
||||
// private http
|
||||
privateConn net.Listener
|
||||
privateHTTP *http.Server
|
||||
|
||||
// feature flags
|
||||
featureLookup baseint.FeatureLookup
|
||||
|
||||
unavailableChannel chan healthcheck.Status
|
||||
}
|
||||
|
||||
// HealthCheckStatus returns health check status channel a client can subscribe to
|
||||
func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
return s.unavailableChannel
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set license manager as feature flag provider in dao
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
storage := os.Getenv("STORAGE")
|
||||
if storage == "clickhouse" {
|
||||
zap.S().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
} else {
|
||||
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
baseconst.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
reader,
|
||||
serverOptions.DisableRules)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = usageManager.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
// logger: logger,
|
||||
// tracer: tracer,
|
||||
ruleManager: rm,
|
||||
serverOptions: serverOptions,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.httpServer = httpServer
|
||||
|
||||
privateServer, err := s.createPrivateServer(apiHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
|
||||
r := mux.NewRouter()
|
||||
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddlewarePrivate)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
//todo(amol): find out a way to add exact domain or
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
|
||||
r := mux.NewRouter()
|
||||
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
|
||||
apiHandler.RegisterRoutes(r)
|
||||
apiHandler.RegisterMetricsRoutes(r)
|
||||
apiHandler.RegisterLogsRoutes(r)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
})
|
||||
}
|
||||
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
})
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||
// we default to that status code.
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// Flush implements the http.Flush interface.
|
||||
func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
||||
|
||||
data := map[string]interface{}{}
|
||||
var postData *model.QueryRangeParamsV2
|
||||
|
||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||
if r.Body != nil {
|
||||
bodyBytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
r.Body.Close() // must close
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
json.Unmarshal(bodyBytes, &postData)
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
signozMetricNotFound := false
|
||||
|
||||
if postData != nil {
|
||||
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
|
||||
|
||||
if postData.CompositeMetricQuery != nil {
|
||||
data["queryType"] = postData.CompositeMetricQuery.QueryType
|
||||
data["panelType"] = postData.CompositeMetricQuery.PanelType
|
||||
}
|
||||
|
||||
data["datasource"] = postData.DataSource
|
||||
}
|
||||
|
||||
if signozMetricNotFound {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
|
||||
}
|
||||
|
||||
return data, true
|
||||
}
|
||||
|
||||
func getActiveLogs(path string, r *http.Request) {
|
||||
// if path == "/api/v1/dashboards/{uuid}" {
|
||||
// telemetry.GetInstance().AddActiveMetricsUser()
|
||||
// }
|
||||
if path == "/api/v1/logs" {
|
||||
hasFilters := len(r.URL.Query().Get("q"))
|
||||
if hasFilters > 0 {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
|
||||
getActiveLogs(path, r)
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||
if metadataExists {
|
||||
for key, value := range dashboardMetadata {
|
||||
data[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var cancel context.CancelFunc
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// initListeners initialises listeners of the server
|
||||
func (s *Server) initListeners() error {
|
||||
// listen on public port
|
||||
var err error
|
||||
publicHostPort := s.serverOptions.HTTPHostPort
|
||||
if publicHostPort == "" {
|
||||
return fmt.Errorf("baseconst.HTTPHostPort is required")
|
||||
}
|
||||
|
||||
s.httpConn, err = net.Listen("tcp", publicHostPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
||||
|
||||
// listen on private port to support internal services
|
||||
privateHostPort := s.serverOptions.PrivateHostPort
|
||||
|
||||
if privateHostPort == "" {
|
||||
return fmt.Errorf("baseconst.PrivateHostPort is required")
|
||||
}
|
||||
|
||||
s.privateConn, err = net.Listen("tcp", privateHostPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start listening on http and private http port concurrently
|
||||
func (s *Server) Start() error {
|
||||
|
||||
// initiate rule manager first
|
||||
if !s.serverOptions.DisableRules {
|
||||
s.ruleManager.Start()
|
||||
} else {
|
||||
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||
}
|
||||
|
||||
err := s.initListeners()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpPort int
|
||||
if port, err := utils.GetPort(s.httpConn.Addr()); err == nil {
|
||||
httpPort = port
|
||||
}
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
||||
|
||||
switch err := s.httpServer.Serve(s.httpConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
// normal exit, nothing to do
|
||||
default:
|
||||
zap.S().Error("Could not start HTTP server", zap.Error(err))
|
||||
}
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
}()
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
||||
|
||||
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
||||
if err != nil {
|
||||
zap.S().Error("Could not start pprof server", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
var privatePort int
|
||||
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
|
||||
privatePort = port
|
||||
}
|
||||
fmt.Println("starting private http")
|
||||
go func() {
|
||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||
|
||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
// normal exit, nothing to do
|
||||
zap.S().Info("private http server closed")
|
||||
default:
|
||||
zap.S().Error("Could not start private HTTP server", zap.Error(err))
|
||||
}
|
||||
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRulesManager(
|
||||
promConfigPath,
|
||||
alertManagerURL string,
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
disableRules bool) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// notifier opts
|
||||
notifierOpts := basealm.NotifierOptions{
|
||||
QueueCapacity: 10000,
|
||||
Timeout: 1 * time.Second,
|
||||
AlertManagerURLs: []string{alertManagerURL},
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &rules.ManagerOptions{
|
||||
NotifierOpts: notifierOpts,
|
||||
Queriers: &rules.Queriers{
|
||||
PqlEngine: pqle,
|
||||
Ch: ch.GetConn(),
|
||||
},
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: nil,
|
||||
DisableRules: disableRules,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
manager, err := rules.NewManager(managerOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||
}
|
||||
|
||||
zap.S().Info("rules manager is ready")
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
30
ee/query-service/constants/constants.go
Normal file
30
ee/query-service/constants/constants.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package constants
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSiteURL = "https://localhost:3301"
|
||||
)
|
||||
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
|
||||
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
if len(v) == 0 {
|
||||
return fallback
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// constant functions that override env vars
|
||||
|
||||
// GetDefaultSiteURL returns default site url, primarily
|
||||
// used to send saml request and allowing backend to
|
||||
// handle http redirect
|
||||
func GetDefaultSiteURL() string {
|
||||
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user