Compare commits
727 Commits
v0.7.2
...
v0.11.3-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d615d7a9e3 | ||
|
|
622943645f | ||
|
|
355264a43e | ||
|
|
2c7deca2ec | ||
|
|
e558dcae3a | ||
|
|
4cf3dc2ec3 | ||
|
|
2e124da366 | ||
|
|
a50d7f227c | ||
|
|
73706d872f | ||
|
|
0480197914 | ||
|
|
65af8c1b98 | ||
|
|
a3b03ef0ca | ||
|
|
9735a6e5ce | ||
|
|
674883cd18 | ||
|
|
36315fcf9c | ||
|
|
46050a217c | ||
|
|
c9363586e1 | ||
|
|
5eed384ffe | ||
|
|
1b152c19ec | ||
|
|
6a3c1c10fb | ||
|
|
f580bedb1c | ||
|
|
acd15af823 | ||
|
|
134c5dc1d2 | ||
|
|
57f4f098f7 | ||
|
|
fce4496214 | ||
|
|
4e38f1dcc0 | ||
|
|
fe0f305ea7 | ||
|
|
1374444f36 | ||
|
|
fe0a4ab0cb | ||
|
|
f2f2069835 | ||
|
|
90d0c72aa2 | ||
|
|
90d1a87027 | ||
|
|
9c4521b34a | ||
|
|
106033c296 | ||
|
|
9372f763c8 | ||
|
|
3bbe2f4f58 | ||
|
|
1b1fb2f13b | ||
|
|
a94bd9b99b | ||
|
|
dcf2ac15b0 | ||
|
|
9e9924943e | ||
|
|
2c9794a6c6 | ||
|
|
6b6f494574 | ||
|
|
a3f11184e4 | ||
|
|
cc3f36b62b | ||
|
|
450602cd72 | ||
|
|
7088c22318 | ||
|
|
b9af7e7ff3 | ||
|
|
00389271cf | ||
|
|
adda2e8a11 | ||
|
|
ed2bbc5035 | ||
|
|
f1fdf78dc5 | ||
|
|
745fd07bd8 | ||
|
|
05de0ccba5 | ||
|
|
c9139c5236 | ||
|
|
0806397816 | ||
|
|
c43dabdb0b | ||
|
|
eaadc3bb95 | ||
|
|
1ec9248975 | ||
|
|
0ccd7777bf | ||
|
|
ac86d840f9 | ||
|
|
8556c87d46 | ||
|
|
461a15d52d | ||
|
|
9e6d9019f7 | ||
|
|
578dafd1ff | ||
|
|
99c0c97c1e | ||
|
|
4875652ecb | ||
|
|
d170515d4d | ||
|
|
73b00f405b | ||
|
|
ea8bd7047f | ||
|
|
596daefa7e | ||
|
|
7081e4ffce | ||
|
|
07dcdb51f7 | ||
|
|
d2e990ebf4 | ||
|
|
c2f95dc727 | ||
|
|
37dedc8b87 | ||
|
|
9cd1be6553 | ||
|
|
5e0eb05a9c | ||
|
|
f48a884f90 | ||
|
|
32fba00aa8 | ||
|
|
166e5612eb | ||
|
|
b23d63cb2b | ||
|
|
5e0ed6f5f5 | ||
|
|
74f947a028 | ||
|
|
cca74e5926 | ||
|
|
1865d75df6 | ||
|
|
d4b0013900 | ||
|
|
55c9eb733d | ||
|
|
54cc363752 | ||
|
|
07d013a716 | ||
|
|
a3015d1077 | ||
|
|
66b67a08a0 | ||
|
|
7a4750a882 | ||
|
|
6d623c5d45 | ||
|
|
6e899175a0 | ||
|
|
aecf3ef93e | ||
|
|
b6afc9315b | ||
|
|
dda82474ae | ||
|
|
998e72374f | ||
|
|
a1f6f09ae1 | ||
|
|
7a1cbdb0bb | ||
|
|
eb28ece680 | ||
|
|
fda6e4472a | ||
|
|
9de99d1872 | ||
|
|
8f9d0f2403 | ||
|
|
04cf1b2697 | ||
|
|
8bdc41bef0 | ||
|
|
616da88790 | ||
|
|
143a5b65f9 | ||
|
|
0807a0ae26 | ||
|
|
1ebf64589f | ||
|
|
80c96af5a4 | ||
|
|
61ebd3aded | ||
|
|
425b732370 | ||
|
|
a742c9aee1 | ||
|
|
3968f11b3d | ||
|
|
5bfc2af51b | ||
|
|
8146da52af | ||
|
|
5dc6d28f2e | ||
|
|
a6ed6c03c1 | ||
|
|
cca4db602c | ||
|
|
7ff49ba47c | ||
|
|
9dcf913a74 | ||
|
|
68194d7e07 | ||
|
|
7881aee350 | ||
|
|
594bfc256c | ||
|
|
5894acdb2d | ||
|
|
6eb9389e81 | ||
|
|
39be8201aa | ||
|
|
6778163a07 | ||
|
|
56a2047560 | ||
|
|
023ef66035 | ||
|
|
22b8572495 | ||
|
|
e39d2f799d | ||
|
|
5b28fe1c9d | ||
|
|
d15f9a1709 | ||
|
|
2c383528bc | ||
|
|
0378dfd12f | ||
|
|
002ccc3975 | ||
|
|
f8f903848e | ||
|
|
047227ad18 | ||
|
|
7b6a086b37 | ||
|
|
baf72610d6 | ||
|
|
a5388d357c | ||
|
|
294d527a0e | ||
|
|
a23788852a | ||
|
|
a771c3b9a6 | ||
|
|
0fe4327877 | ||
|
|
4825ed6e5f | ||
|
|
2f17898390 | ||
|
|
373cbbc375 | ||
|
|
f8be4a6d5b | ||
|
|
420d46ab01 | ||
|
|
bdb6901c74 | ||
|
|
94cde11164 | ||
|
|
448e14b32f | ||
|
|
6ac7cb1022 | ||
|
|
2132d1059c | ||
|
|
ff9c41464b | ||
|
|
acb3721815 | ||
|
|
5912d3a4a0 | ||
|
|
a527c33c7d | ||
|
|
c24bdfc8cf | ||
|
|
051f640100 | ||
|
|
b5c8764605 | ||
|
|
475c44a000 | ||
|
|
1b6597b974 | ||
|
|
8e4fbbe770 | ||
|
|
2450fff34d | ||
|
|
df17d4ca54 | ||
|
|
33e7252645 | ||
|
|
2e9affa80c | ||
|
|
bf2f3f8f5e | ||
|
|
d92aad38df | ||
|
|
78d13c94ae | ||
|
|
73e699080d | ||
|
|
b6aa378fae | ||
|
|
b5fe3d7fa1 | ||
|
|
7c14a75c68 | ||
|
|
10c6325e46 | ||
|
|
e4883495c3 | ||
|
|
b9d63d6b8f | ||
|
|
65804f245c | ||
|
|
964b819f20 | ||
|
|
e22be60a9e | ||
|
|
b6a6833a64 | ||
|
|
c90e9ffa34 | ||
|
|
c5c7fb238f | ||
|
|
4ad79bee18 | ||
|
|
bebfaa1c4c | ||
|
|
6fb7e34dbc | ||
|
|
a2e1c41343 | ||
|
|
46f258747a | ||
|
|
11c352741d | ||
|
|
a63267cf90 | ||
|
|
3200248e98 | ||
|
|
a8c7237bbb | ||
|
|
3a287b2b16 | ||
|
|
c3d665e119 | ||
|
|
8d03569a0a | ||
|
|
0620cacb0b | ||
|
|
7da69f6a75 | ||
|
|
7aeaecaf1f | ||
|
|
3ea36092f6 | ||
|
|
8db4793ad6 | ||
|
|
83f3180641 | ||
|
|
64e638fd58 | ||
|
|
5554cce379 | ||
|
|
3e2a6df200 | ||
|
|
3dc1dc970f | ||
|
|
0ceaa56679 | ||
|
|
ab52538e91 | ||
|
|
ef69505bf9 | ||
|
|
61b79742dc | ||
|
|
4d1516e3fc | ||
|
|
0b08c80038 | ||
|
|
a84754e8a8 | ||
|
|
a09a4c264e | ||
|
|
ed5d217c76 | ||
|
|
54e09e1292 | ||
|
|
8477aebc8e | ||
|
|
d7f7f20520 | ||
|
|
ef141d2cee | ||
|
|
4ee92b7c55 | ||
|
|
80c80b2180 | ||
|
|
da368ab5e8 | ||
|
|
091d769ad8 | ||
|
|
be814afeea | ||
|
|
6697702c0f | ||
|
|
5e93f266c1 | ||
|
|
352c7ac581 | ||
|
|
db8f35cca2 | ||
|
|
3af1d2b5bb | ||
|
|
58038c222f | ||
|
|
b3b5459a08 | ||
|
|
1bfc9877fc | ||
|
|
8ce806169f | ||
|
|
3c7e0f66fa | ||
|
|
cbdeb5ad03 | ||
|
|
5bbe1246cc | ||
|
|
789d65d7c4 | ||
|
|
5cbc8af4af | ||
|
|
2cffe0c53e | ||
|
|
cf0eb44143 | ||
|
|
d7ce786f4b | ||
|
|
ae5d4326a2 | ||
|
|
6fa0209104 | ||
|
|
50501ea80f | ||
|
|
669dc05eec | ||
|
|
e839920b3b | ||
|
|
32b4bbcaec | ||
|
|
58b0c08d71 | ||
|
|
dd9cbcee33 | ||
|
|
9c3b4508be | ||
|
|
450407d0bf | ||
|
|
276b26b170 | ||
|
|
475723a03a | ||
|
|
897728cc71 | ||
|
|
bdf78cbf2c | ||
|
|
e88cfcd4da | ||
|
|
90566360ae | ||
|
|
0a6fa0ee85 | ||
|
|
ba35b3e442 | ||
|
|
73c2137cd7 | ||
|
|
dbe68c064c | ||
|
|
ba7427f280 | ||
|
|
6dbc11991b | ||
|
|
eeae71163c | ||
|
|
a25e7a64ce | ||
|
|
d0e272b679 | ||
|
|
16ff59b4de | ||
|
|
f2074a9d0e | ||
|
|
47e6e00a64 | ||
|
|
282c47def8 | ||
|
|
9d3fc493a3 | ||
|
|
b2afb9aabc | ||
|
|
a733adad2c | ||
|
|
cc18cc9087 | ||
|
|
ecb2ed8ac8 | ||
|
|
31931e5a6c | ||
|
|
31848c488d | ||
|
|
bdcc997672 | ||
|
|
d68334b2ca | ||
|
|
3ebded66ea | ||
|
|
2ed24df250 | ||
|
|
5a34ce2221 | ||
|
|
aae6a1adf1 | ||
|
|
bef83d30cc | ||
|
|
1ebf3dbf65 | ||
|
|
f57808bdb4 | ||
|
|
6bdcd4f5bb | ||
|
|
d726ad9ca6 | ||
|
|
4ed3295b80 | ||
|
|
72dc4d62ce | ||
|
|
186f4dca71 | ||
|
|
e4f2219f8c | ||
|
|
fe9a6c2448 | ||
|
|
5c2a875211 | ||
|
|
6dab77409d | ||
|
|
0f811af34e | ||
|
|
bdbcbb5f6c | ||
|
|
ae91d7e8a9 | ||
|
|
64927acd97 | ||
|
|
9dae957c8f | ||
|
|
afbcde5edc | ||
|
|
b8c3fd1cbf | ||
|
|
93cf5dfa46 | ||
|
|
d2c28a47c2 | ||
|
|
9c68c6af93 | ||
|
|
3771f85c7d | ||
|
|
b39e0465b0 | ||
|
|
bc97ea8fc0 | ||
|
|
1e980c3886 | ||
|
|
5ec52f03ad | ||
|
|
4aab923e40 | ||
|
|
17b0ee5434 | ||
|
|
08c3c4c51c | ||
|
|
5f802e0e20 | ||
|
|
63e663a92d | ||
|
|
d21ab7b82d | ||
|
|
84b876170d | ||
|
|
88d8dba90e | ||
|
|
d7d0d70aa5 | ||
|
|
671b441ec9 | ||
|
|
729c7fce7b | ||
|
|
224ec8d0d9 | ||
|
|
7eed865660 | ||
|
|
241121ebec | ||
|
|
15af158a9c | ||
|
|
2f02aeb031 | ||
|
|
3603e497a6 | ||
|
|
070d32a0ef | ||
|
|
0b36da714f | ||
|
|
ce0ac1e3af | ||
|
|
bcb5256de0 | ||
|
|
fdca72b9b2 | ||
|
|
7f64dfd023 | ||
|
|
8871d53ae0 | ||
|
|
2313ec3f9a | ||
|
|
56208c9b06 | ||
|
|
84e281271c | ||
|
|
43e4f637d1 | ||
|
|
c156b9c403 | ||
|
|
9885572842 | ||
|
|
4803fd9c8e | ||
|
|
c2fe35388e | ||
|
|
ba5e3dcfd3 | ||
|
|
9c8c31d912 | ||
|
|
469254e9fc | ||
|
|
1f2ec0d728 | ||
|
|
ff1fc83b66 | ||
|
|
0a5eff2255 | ||
|
|
24e84bac2a | ||
|
|
db00a78a4e | ||
|
|
4d2e8b0ea5 | ||
|
|
4f12f8c85c | ||
|
|
fabab345cb | ||
|
|
00355b3383 | ||
|
|
c16ae790d4 | ||
|
|
c6d57a7a53 | ||
|
|
d8775c91d7 | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
b3dfd567e0 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
660391c360 | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
8a0bcf6cd9 | ||
|
|
0c06c5ee0e | ||
|
|
f3610ffe55 | ||
|
|
d150cfa46c | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
3995de16f0 | ||
|
|
f149258de2 | ||
|
|
da386b0e8e | ||
|
|
75cdac376f | ||
|
|
0ef13a89ed | ||
|
|
084d8ecccd | ||
|
|
b9f3663b6c | ||
|
|
4067aa5025 | ||
|
|
ebf9316714 | ||
|
|
f5009abca6 | ||
|
|
b16a793cbc | ||
|
|
374a2415d9 | ||
|
|
3789e25a1e | ||
|
|
10ab057e29 | ||
|
|
41b9129145 | ||
|
|
f5d10b72f0 | ||
|
|
6fb6a576aa | ||
|
|
7cf567792a | ||
|
|
fe18e85e36 | ||
|
|
147476d802 | ||
|
|
c94f23a710 | ||
|
|
1a2ef4fde6 | ||
|
|
6c505f9e86 | ||
|
|
fb97540c7c | ||
|
|
9cf5c7ef74 | ||
|
|
6223e89d4c | ||
|
|
62f8cddc27 | ||
|
|
ffae767fab | ||
|
|
c23f97c3d0 | ||
|
|
11eb1e4f72 | ||
|
|
0554ed7ecb | ||
|
|
ca4ce0d380 | ||
|
|
65f50bb70d | ||
|
|
dbe9f3a034 | ||
|
|
7cdd136f61 | ||
|
|
21d5e0b71c | ||
|
|
fe53aa412b | ||
|
|
6c5a48082b | ||
|
|
b7adc27f02 | ||
|
|
67b4290846 | ||
|
|
8a7cbc8ad3 | ||
|
|
c74d87a21a | ||
|
|
6486425f46 | ||
|
|
5b316afa12 | ||
|
|
2dcc6fda77 | ||
|
|
a2f570d78c | ||
|
|
ef209e11d5 | ||
|
|
1851e76bca | ||
|
|
fa23050916 | ||
|
|
79475bde71 | ||
|
|
039201acae | ||
|
|
22454abc4a | ||
|
|
4c8b7af0eb | ||
|
|
5caf94f024 | ||
|
|
ce0b37ca2e | ||
|
|
5f529e1c10 | ||
|
|
05c923df9b | ||
|
|
90637212bc | ||
|
|
b58f45c268 | ||
|
|
6a6fd44719 | ||
|
|
81cc120539 | ||
|
|
831381a1ff | ||
|
|
fd0656e0fc | ||
|
|
e217ea0c9c | ||
|
|
bdf9333dcf | ||
|
|
eae97d6ffc | ||
|
|
9f5241e82c | ||
|
|
284eda4072 | ||
|
|
63693a4185 | ||
|
|
d9cf9071d3 | ||
|
|
5e41c7f62b | ||
|
|
e903277143 | ||
|
|
f2def38df8 | ||
|
|
71e742fb2b | ||
|
|
571e087f31 | ||
|
|
3e5f9f3b25 | ||
|
|
c969b5f329 | ||
|
|
5bcf42d398 | ||
|
|
c81b0b2a8b | ||
|
|
d52308c9b5 | ||
|
|
7948bca710 | ||
|
|
29c0b43481 | ||
|
|
9351fd09c2 | ||
|
|
59f32884d2 | ||
|
|
6ccdc5296e | ||
|
|
3ef9d96678 | ||
|
|
642ece288e | ||
|
|
3ab4f71aa1 | ||
|
|
b5be770a03 | ||
|
|
08e3428744 | ||
|
|
b335d440cf | ||
|
|
1293378c5c | ||
|
|
5424c7714f | ||
|
|
95311db543 | ||
|
|
bf52722689 | ||
|
|
6064840dd1 | ||
|
|
182adc551c | ||
|
|
2b5b79e34a | ||
|
|
508c6ced80 | ||
|
|
3c2173de9e | ||
|
|
9a6bcaadf8 | ||
|
|
08bbb0259d | ||
|
|
93638d5615 | ||
|
|
844ca57686 | ||
|
|
b2eec25f33 | ||
|
|
61d01fa2d5 | ||
|
|
a6bf6e4e07 | ||
|
|
d454482f43 | ||
|
|
f6aece6349 | ||
|
|
dc9508269d | ||
|
|
a6c41f312d | ||
|
|
f487f7420b | ||
|
|
da8f3a6e81 | ||
|
|
d102c94670 | ||
|
|
60288f7ba0 | ||
|
|
0cbe17a315 | ||
|
|
dce9f36a8e | ||
|
|
aa5100261d | ||
|
|
f4cc2a3a05 | ||
|
|
041a5249b3 | ||
|
|
a767697a86 | ||
|
|
71cb70c62c | ||
|
|
647cabc4f4 | ||
|
|
e864e33ad3 | ||
|
|
5bdbe792f5 | ||
|
|
399efb0fb2 | ||
|
|
4b72de6884 | ||
|
|
9f1473e7de | ||
|
|
d6c4df8b4b | ||
|
|
7150971dc0 | ||
|
|
d0846b8dd2 | ||
|
|
ead6885b29 | ||
|
|
d72dacdc1f | ||
|
|
1d6ddd4890 | ||
|
|
58daca1579 | ||
|
|
1e522ad8f1 | ||
|
|
8809105a8d | ||
|
|
064c3e0449 | ||
|
|
2a348e916c | ||
|
|
5744193f50 | ||
|
|
ccf352f2db | ||
|
|
6e446dc0ab | ||
|
|
566c2becdf | ||
|
|
3b3fd2b3a9 | ||
|
|
eae53d9eff | ||
|
|
42842b6b17 | ||
|
|
95f8dfb4bc | ||
|
|
a8c5934fc5 | ||
|
|
3f2a4d6eac | ||
|
|
170609a81f | ||
|
|
76fccbbba4 | ||
|
|
147ed9f24b | ||
|
|
a69bc321a9 | ||
|
|
c9e02a8b25 | ||
|
|
24d6a1e7b2 | ||
|
|
a0efa63185 | ||
|
|
fd83cea9a0 | ||
|
|
5be1eb58b2 | ||
|
|
8367c106bc | ||
|
|
8064ae1f37 | ||
|
|
ab4d9af442 | ||
|
|
eb0d3374d5 | ||
|
|
6c4c814b3f | ||
|
|
32e8e48928 | ||
|
|
53e7037f48 | ||
|
|
a566b5dc97 | ||
|
|
4dc668fd13 | ||
|
|
d085506d3e | ||
|
|
1b28a4e6f5 | ||
|
|
20e924b116 | ||
|
|
1d28ceb3d7 | ||
|
|
0ff4c040bf | ||
|
|
1002ab553e | ||
|
|
3dc94c8da7 | ||
|
|
5a5aca2113 | ||
|
|
cb22117a0f | ||
|
|
739946fa47 | ||
|
|
7939902f03 | ||
|
|
d34e08fa3d | ||
|
|
5556d1d6fc | ||
|
|
d4d1104a53 | ||
|
|
225a345baa | ||
|
|
31443dabe7 | ||
|
|
0efb901863 | ||
|
|
eb4abe900c | ||
|
|
e7ba5f9f33 | ||
|
|
995232e057 | ||
|
|
cc5d47e3ee | ||
|
|
b1de6c1d7d | ||
|
|
84bfe11285 | ||
|
|
ca78947a55 | ||
|
|
ac49f84982 | ||
|
|
cc47f02ebf | ||
|
|
ac70240b72 | ||
|
|
78b1a750fa | ||
|
|
d5a6336239 | ||
|
|
01bad0f18a | ||
|
|
1b79a9bf35 | ||
|
|
0426bf06eb | ||
|
|
3d8354fb99 | ||
|
|
696241b962 | ||
|
|
8a883f1b5e | ||
|
|
7765cee610 | ||
|
|
b958bad81f | ||
|
|
deff5d5e17 | ||
|
|
44d3f35a5f | ||
|
|
36d8bc7bc6 | ||
|
|
897c5d2371 | ||
|
|
f22d5f0fbd | ||
|
|
efe57ff15d | ||
|
|
0fb5b90e4e | ||
|
|
91bdb77a0e | ||
|
|
eb63b6da2a | ||
|
|
abe4940e74 | ||
|
|
8e621b6a70 | ||
|
|
3302a84ab5 |
6
.dockerignore
Normal file
6
.dockerignore
Normal file
@@ -0,0 +1,6 @@
|
||||
.git
|
||||
.github
|
||||
.vscode
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
33
.editorconfig
Normal file
33
.editorconfig
Normal file
@@ -0,0 +1,33 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
# Matches multiple files with brace expansion notation
|
||||
# Set default charset
|
||||
[*.{js,py}]
|
||||
charset = utf-8
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
# Indentation override for all JS under lib directory
|
||||
[lib/**.js]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -2,5 +2,6 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
/frontend/ @palash-signoz @pranshuchittora
|
||||
/frontend/ @palashgdev @pranshuchittora
|
||||
/deploy/ @prashant-shahi
|
||||
/pkg/query-service/ @srikanthccv
|
||||
|
||||
53
.github/workflows/build.yaml
vendored
53
.github/workflows/build.yaml
vendored
@@ -1,50 +1,27 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'frontend/**'
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
get_filters:
|
||||
runs-on: ubuntu-latest
|
||||
# Set job outputs to values from filter step
|
||||
outputs:
|
||||
frontend: ${{ steps.filter.outputs.frontend }}
|
||||
query-service: ${{ steps.filter.outputs.query-service }}
|
||||
flattener: ${{ steps.filter.outputs.flattener }}
|
||||
steps:
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
frontend:
|
||||
- 'frontend/**'
|
||||
query-service:
|
||||
- 'pkg/query-service/**'
|
||||
flattener:
|
||||
- 'pkg/processors/flattener/**'
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run Prettier
|
||||
run: cd frontend && npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
continue-on-error: true
|
||||
- name: Run Jest
|
||||
run: cd frontend && npm run jest
|
||||
- name: TSC
|
||||
run: yarn tsc
|
||||
working-directory: ./frontend
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -52,26 +29,20 @@ jobs:
|
||||
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build query-service image
|
||||
- name: Build query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-query-service-amd64
|
||||
|
||||
build-flattener:
|
||||
build-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build flattener docker image
|
||||
- name: Build EE query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-flattener-amd64
|
||||
make build-ee-query-service-amd64
|
||||
|
||||
17
.github/workflows/codeball.yml
vendored
Normal file
17
.github/workflows/codeball.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Codeball
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
codeball_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Codeball
|
||||
steps:
|
||||
# Run Codeball on all new Pull Requests 🚀
|
||||
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
|
||||
- name: Codeball
|
||||
uses: sturdy-dev/codeball-action@v2
|
||||
with:
|
||||
approvePullRequests: "true"
|
||||
labelPullRequestsWhenApproved: "true"
|
||||
labelPullRequestsWhenReviewNeeded: "false"
|
||||
failJobsWhenReviewNeeded: "false"
|
||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
env:
|
||||
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
jobs:
|
||||
create_issue_on_merge:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
uses: bahmutov/npm-install@v1
|
||||
with:
|
||||
install-command: yarn --frozen-lockfile
|
||||
- name: Comment on PR
|
||||
run: node create-issue.js
|
||||
22
.github/workflows/dependency-review.yml
vendored
Normal file
22
.github/workflows/dependency-review.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Dependency Review Action
|
||||
#
|
||||
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
|
||||
#
|
||||
# Source repository: https://github.com/actions/dependency-review-action
|
||||
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v3
|
||||
- name: 'Dependency Review'
|
||||
with:
|
||||
fail-on-severity: high
|
||||
uses: actions/dependency-review-action@v2
|
||||
4
.github/workflows/e2e-k3s.yaml
vendored
4
.github/workflows/e2e-k3s.yaml
vendored
@@ -16,7 +16,9 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build query-service image
|
||||
run: make build-query-service-amd64
|
||||
env:
|
||||
DEV_BUILD: 1
|
||||
run: make build-ee-query-service-amd64
|
||||
|
||||
- name: Build frontend image
|
||||
run: make build-frontend-amd64
|
||||
|
||||
24
.github/workflows/playwright.yaml
vendored
Normal file
24
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Playwright Tests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
playwright:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "16.x"
|
||||
- name: Install dependencies
|
||||
run: CI=1 yarn install
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps
|
||||
- name: Run Playwright tests
|
||||
run: yarn playwright
|
||||
env:
|
||||
# This might depend on your test-runner/language binding
|
||||
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}
|
||||
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||
# a comment alerting users of this missing requirement.
|
||||
name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, synchronize, opened, reopened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
verify_linked_issue:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
37
.github/workflows/push.yaml
vendored
37
.github/workflows/push.yaml
vendored
@@ -11,6 +11,41 @@ on:
|
||||
jobs:
|
||||
|
||||
image-build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v5.1
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-query-service
|
||||
|
||||
image-build-and-push-ee-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -43,7 +78,7 @@ jobs:
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-query-service
|
||||
run: make build-push-ee-query-service
|
||||
|
||||
image-build-and-push-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
|
||||
node_modules
|
||||
yarn.lock
|
||||
package.json
|
||||
@@ -5,6 +6,7 @@ package.json
|
||||
deploy/docker/environment_tiny/common_test
|
||||
frontend/node_modules
|
||||
frontend/.pnp
|
||||
frontend/i18n-translations-hash.json
|
||||
*.pnp.js
|
||||
|
||||
# testing
|
||||
@@ -15,6 +17,7 @@ frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
frontend/test-results
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
@@ -27,10 +30,6 @@ frontend/npm-debug.log*
|
||||
frontend/yarn-debug.log*
|
||||
frontend/yarn-error.log*
|
||||
frontend/src/constants/env.ts
|
||||
frontend/cypress/**/*.mp4
|
||||
|
||||
# env file for cypress
|
||||
frontend/cypress.env.json
|
||||
|
||||
.idea
|
||||
|
||||
@@ -43,10 +42,14 @@ frontend/cypress.env.json
|
||||
frontend/*.env
|
||||
pkg/query-service/signoz.db
|
||||
|
||||
pkg/query-service/tframe/test-deploy/data/
|
||||
pkg/query-service/tests/test-deploy/data/
|
||||
|
||||
ee/query-service/signoz.db
|
||||
|
||||
ee/query-service/tests/test-deploy/data/
|
||||
|
||||
# local data
|
||||
|
||||
*.db
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
@@ -4,4 +4,4 @@
|
||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||
|
||||
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
|
||||
408
CONTRIBUTING.md
408
CONTRIBUTING.md
@@ -1,152 +1,368 @@
|
||||
# How to Contribute
|
||||
# Contributing Guidelines
|
||||
|
||||
There are primarily 2 areas in which you can contribute in SigNoz
|
||||
## Welcome to SigNoz Contributing section 🎉
|
||||
|
||||
- Frontend ( written in Typescript, React)
|
||||
- Backend - ( Query Service - written in Go)
|
||||
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
|
||||
|
||||
> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
- We accept contributions made to the [SigNoz `develop` branch]()
|
||||
- Find all SigNoz Docker Hub images here
|
||||
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
|
||||
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
|
||||
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
|
||||
|
||||
> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
## Finding contributions to work on 💬
|
||||
|
||||
# Develop Frontend
|
||||
Looking at the existing issues is a great way to find something to contribute on.
|
||||
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
|
||||
|
||||
### Contribute to Frontend with Docker installation of SigNoz
|
||||
## Sections:
|
||||
- [General Instructions](#1-general-instructions-)
|
||||
- [For Creating Issue(s)](#11-for-creating-issues)
|
||||
- [For Pull Requests(s)](#12-for-pull-requests)
|
||||
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
|
||||
- [Develop Frontend](#3-develop-frontend-)
|
||||
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
|
||||
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
|
||||
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
|
||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
|
||||
- run `cd deploy` to move to deploy directory
|
||||
- Install signoz locally without the frontend
|
||||
- Add below configuration to query-service section at `docker/clickhouse-setup/docker-compose.yaml#L36`
|
||||
# 1. General Instructions 📝
|
||||
|
||||
```docker
|
||||
## 1.1 For Creating Issue(s)
|
||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
|
||||
#### Details like these are incredibly useful:
|
||||
|
||||
- **Requirement** - what kind of use case are you trying to solve?
|
||||
- **Proposal** - what do you suggest to solve the problem or improve the existing
|
||||
situation?
|
||||
- Any open questions to address❓
|
||||
|
||||
#### If you are reporting a bug, details like these are incredibly useful:
|
||||
|
||||
- A reproducible test case or series of steps.
|
||||
- The version of our code being used.
|
||||
- Any modifications you've made relevant to the bug🐞.
|
||||
- Anything unusual about your environment or deployment.
|
||||
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone 🙌.
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
## 1.2 For Pull Request(s)
|
||||
|
||||
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
|
||||
Before sending us a pull request, please ensure that,
|
||||
|
||||
- Fork the SigNoz repo on GitHub, clone it on your machine.
|
||||
- Create a branch with your changes.
|
||||
- You are working against the latest source on the `develop` branch.
|
||||
- Modify the source; please focus only on the specific change you are contributing.
|
||||
- Ensure local tests pass.
|
||||
- Commit to your fork using clear commit messages.
|
||||
- Send us a pull request, answering any default questions in the pull request interface.
|
||||
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
|
||||
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
|
||||
- Once the change has been approved and merged, we will inform you in a comment.
|
||||
|
||||
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
|
||||
|
||||
* 1️⃣ First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes, etc...
|
||||
* This PR is usually trivial to review, so the size limit does not apply to
|
||||
it.
|
||||
* 2️⃣ Second PR should include the concrete implementation of the component. If the
|
||||
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
|
||||
multiple PRs.
|
||||
* If there are multiple sub-component then ideally each one should be implemented as
|
||||
a **separate** pull request.
|
||||
* Last PR should include changes to **any user-facing documentation.** And should include
|
||||
end-to-end tests if applicable. The component must be enabled
|
||||
only after sufficient testing, and there is enough confidence in the
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
||||
|
||||
### Pointers:
|
||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
<hr>
|
||||
|
||||
### Conventions to follow when submitting Commits and Pull Request(s).
|
||||
|
||||
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
|
||||
|
||||
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
|
||||
|
||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 2. How to Contribute 🙋🏻♂️
|
||||
|
||||
#### There are primarily 2 areas in which you can contribute to SigNoz
|
||||
|
||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||
|
||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||
|
||||
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
|
||||
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 3. Develop Frontend 🌚
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
|
||||
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
|
||||
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
|
||||

|
||||
|
||||
|
||||
- run `cd deploy` to move to deploy directory,
|
||||
- Install signoz locally **without** the frontend,
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
|
||||
```
|
||||
ports:
|
||||
- "8080:8080"
|
||||
```
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
|
||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
|
||||
|
||||
- Next run,
|
||||
```
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
```
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
If you have backend api exposed via frontend nginx:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:3301
|
||||
```
|
||||
If not:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
### Contribute to Frontend without installing SigNoz backend
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
|
||||
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
**[`^top^`](#)**
|
||||
|
||||
**_Frontend should now be accessible at `http://localhost:3301/application`_**
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
# Contribute to Query-Service
|
||||
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)
|
||||
- Clone the SigNoz repository and cd into signoz/frontend directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
|
||||
````
|
||||
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### To run ClickHouse setup (recommended for local development)
|
||||
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
|
||||
|
||||
- git clone https://github.com/SigNoz/signoz.git
|
||||
- run `cd signoz` to move to signoz directory
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L45`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L28`
|
||||
- add below configuration to clickhouse section at `docker/clickhouse-setup/docker-compose.yaml#L6`
|
||||
```docker
|
||||
expose:
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
|
||||
|
||||
## 4.1 To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||
|
||||
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
|
||||
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
|
||||
```
|
||||
ports:
|
||||
- 9001:9000
|
||||
```
|
||||
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
|
||||
|
||||
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||
- Then, you need to create a `.env` file with the following environment variable
|
||||
```
|
||||
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
|
||||
```
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
|
||||
|
||||
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
|
||||
|
||||
#### Run locally,
|
||||
```
|
||||
|
||||
- run `cd pkg/query-service/` to move to query-service directory
|
||||
- Open ./constants/constants.go
|
||||
- Replace ```const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db"``` \
|
||||
with ```const RELATIONAL_DATASOURCE_PATH = "./signoz.db".```
|
||||
|
||||
- Install signoz locally without the frontend and query-service
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
|
||||
|
||||
#### Run locally
|
||||
```console
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||
```
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
#### Build and Run locally
|
||||
```
|
||||
cd pkg/query-service
|
||||
go build -o build/query-service main.go
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
|
||||
```
|
||||
|
||||
**_Query Service should now be available at `http://localhost:8080`_**
|
||||
#### Docker Images
|
||||
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
|
||||
|
||||
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
||||
```
|
||||
docker pull signoz/query-service
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:latest
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:develop
|
||||
```
|
||||
|
||||
### Important Note:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
|
||||
|
||||
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
|
||||
---
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
Click the button below. A workspace with all required environments will be created.
|
||||
|
||||
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
# Contribute to SigNoz Helm Chart
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
|
||||
# 5. Contribute to SigNoz Helm Chart 📊
|
||||
|
||||
### To run helm chart for local development
|
||||
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
|
||||
|
||||
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
|
||||
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
## 5.1 To run helm chart for local development
|
||||
|
||||
- Clone the SigNoz repository and cd into charts directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/charts.git && cd charts
|
||||
```
|
||||
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [k3d](https://k3d.io/#installation)
|
||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
||||
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
|
||||
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
|
||||
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
|
||||
- next run,
|
||||
```
|
||||
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
|
||||
```
|
||||
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
|
||||
**To load data with HotROD sample app:**
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
**To stop the load generation:**
|
||||
**5.1.2 To load data with the HotROD sample app:**
|
||||
|
||||
```sh
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**5.1.3 To stop the load generation:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
---
|
||||
|
||||
## General Instructions
|
||||
## Other Ways to Contribute
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
||||
There are many other ways to get involved with the community and to participate in this project:
|
||||
|
||||
- If you find any bugs, please create an issue
|
||||
- If you find anything missing in documentation, you can create an issue with label **documentation**
|
||||
- If you want to build any new feature, please create an issue with label `enhancement`
|
||||
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
|
||||
- Use the product, submitting GitHub issues when a problem is found.
|
||||
- Help code review pull requests and participate in issue threads.
|
||||
- Submit a new feature request as an issue.
|
||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||
- Tell others about the project on Twitter, your blog, etc.
|
||||
|
||||
### Conventions to follow when submitting commits, PRs
|
||||
|
||||
1. We try to follow https://www.conventionalcommits.org/en/v1.0.0/
|
||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
More specifically the commits and PRs should have type specifiers prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
|
||||
|
||||
2. Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows
|
||||
|
||||
3. Feel free to ping us on `#contributing` or `#contributing-frontend` on our slack community if you need any help on this :)
|
||||
Thank You!
|
||||
|
||||
8
LICENSE
8
LICENSE
@@ -1,6 +1,10 @@
|
||||
MIT License
|
||||
Copyright (c) 2020-present SigNoz Inc.
|
||||
|
||||
Copyright (c) 2021 SigNoz
|
||||
Portions of this software are licensed as follows:
|
||||
|
||||
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
|
||||
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
93
Makefile
93
Makefile
@@ -7,31 +7,36 @@ BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
FLATTENER_DIRECTORY ?= pkg/processors/flattener
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
LOCAL_GOOS ?= $(shell go env GOOS)
|
||||
LOCAL_GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= latest
|
||||
|
||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
|
||||
DEV_BUILD ?= ""
|
||||
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/query-service
|
||||
buildVersion=${PACKAGE}/version.buildVersion
|
||||
buildHash=${PACKAGE}/version.buildHash
|
||||
buildTime=${PACKAGE}/version.buildTime
|
||||
gitBranch=${PACKAGE}/version.gitBranch
|
||||
PACKAGE?=go.signoz.io/signoz
|
||||
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
|
||||
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
|
||||
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
|
||||
all: build-push-frontend build-push-query-service build-push-flattener
|
||||
all: build-push-frontend build-push-query-service
|
||||
# Steps to build and push docker image of frontend
|
||||
.PHONY: build-frontend-amd64 build-push-frontend
|
||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||
@@ -40,7 +45,7 @@ build-frontend-amd64:
|
||||
@echo "--> Building frontend docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of frontend(used in push pipeline)
|
||||
@@ -59,40 +64,42 @@ build-query-service-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS) .
|
||||
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
|
||||
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of flattener
|
||||
.PHONY: build-flattener-amd64 build-push-flattener
|
||||
# Step to build docker image of flattener in amd64 (used in build pipeline)
|
||||
build-flattener-amd64:
|
||||
# Step to build EE docker image of query service in amd64 (used in build pipeline)
|
||||
build-ee-query-service-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building flattener docker image for amd64"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
|
||||
else \
|
||||
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
|
||||
fi
|
||||
|
||||
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
|
||||
build-push-flattener:
|
||||
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-ee-query-service:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing flattener docker image"
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane \
|
||||
--no-cache --push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
|
||||
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
dev-setup:
|
||||
mkdir -p /var/lib/signoz
|
||||
@@ -102,24 +109,26 @@ dev-setup:
|
||||
@echo "--> Local Setup completed"
|
||||
@echo "------------------"
|
||||
|
||||
run-x86:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up -d
|
||||
run-local:
|
||||
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
up --build -d
|
||||
|
||||
run-arm:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml up -d
|
||||
down-local:
|
||||
@docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
down -v
|
||||
|
||||
run-x86:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||
|
||||
down-x86:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
down-arm:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml down -v
|
||||
|
||||
clear-standalone-data:
|
||||
@cd $(STANDALONE_DIRECTORY)
|
||||
@docker run --rm -v "data:/pwd" busybox \
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
|
||||
clear-swarm-data:
|
||||
@cd $(SWARM_DIRECTORY)
|
||||
@docker run --rm -v "data:/pwd" busybox \
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
@@ -15,10 +14,10 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
<a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
41
README.md
41
README.md
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
@@ -15,9 +14,9 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
@@ -32,7 +31,6 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||
|
||||

|
||||
<br />
|
||||

|
||||
@@ -88,8 +86,7 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Getting Started
|
||||
|
||||
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
|
||||
@@ -102,7 +99,6 @@ The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshoo
|
||||
### Deploy in Kubernetes using Helm
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -112,7 +108,7 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
|
||||
|
||||
### SigNoz vs Prometheus
|
||||
|
||||
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
|
||||
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
|
||||
|
||||
Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
|
||||
|
||||
@@ -120,7 +116,7 @@ Our goal is to provide an integrated UI between metrics & traces - similar to wh
|
||||
|
||||
### SigNoz vs Jaeger
|
||||
|
||||
Jaeger only does distributed tracing. SigNoz does both metrics and traces, and we also have log management in our roadmap.
|
||||
Jaeger only does distributed tracing. SigNoz supports metrics, traces and logs - all the 3 pillars of observability.
|
||||
|
||||
Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
|
||||
@@ -133,11 +129,27 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
@@ -156,11 +168,8 @@ Join the [slack community](https://signoz.io/slack) to know more about distribut
|
||||
|
||||
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
As always, thanks to our amazing contributors!
|
||||
As always, thanks to our amazing contributors!
|
||||
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
@@ -14,14 +13,19 @@
|
||||
|
||||
##
|
||||
|
||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式跟踪来增加软件技术栈的可见性。
|
||||
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式追踪来增加软件技术栈的可见性。
|
||||
|
||||
👉 你能看到一些性能矩阵,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
||||
👉 你能看到一些性能指标,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
||||
|
||||
👉 通过准确的跟踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
||||
👉 通过准确的追踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
||||
|
||||
👉 聚合trace数据来获得业务相关指标。
|
||||
|
||||

|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -37,12 +41,12 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
## 功能:
|
||||
|
||||
- 应用总览矩阵(matrix),如RPS, 50/90/99百分比延迟率,错误率
|
||||
- 应用概览指标(metrics),如RPS, p50/p90/p99延迟率分位值,错误率等。
|
||||
- 应用中最慢的终端(endpoint)
|
||||
- 查看准确的网络请求跟踪来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
||||
- 通过服务名称、操作、延迟、错误、标签来过滤跟踪
|
||||
- 对过滤后的跟踪数据做矩阵聚合。比如,获得过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal`的错误率和p99延迟
|
||||
- 整合的矩阵和跟踪用户界面。不需要像从Prometheus切换到Jaeger才能调试问题
|
||||
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
||||
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
|
||||
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
|
||||
- 为metrics和trace提供统一的UI。排查问题不需要在Prometheus和Jaeger之间切换。
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -54,7 +58,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
我们想做一个自服务的开源版本的工具,类似于DataDog和NewRelic,用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
|
||||
|
||||
开源也让你对配置、采样和上线率有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
||||
开源也让你对配置、采样和正常运行时间有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
||||
|
||||
### 语言支持
|
||||
|
||||
@@ -72,8 +76,8 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## 入门
|
||||
|
||||
|
||||
|
||||
|
||||
### 使用Docker部署
|
||||
|
||||
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
|
||||
@@ -81,35 +85,34 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
|
||||
|
||||
### 使用Helm在Kubernetes上部署
|
||||
|
||||
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Comparisons to Familiar Tools
|
||||
## 与其他方案的比较
|
||||
|
||||
### SigNoz vs Prometheus
|
||||
|
||||
如果你只是需要矩阵,那Prometheus是不错的,但如果你要无缝的在矩阵和跟踪之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
||||
如果你只是需要监控指标(metrics),那Prometheus是不错的,但如果你要无缝的在metrics和traces之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
||||
|
||||
我们的目标是在矩阵和跟踪之间提供整合的UI - 类似于Datadog这样的Saas厂提供的方案,能够对跟踪进行过滤和聚合,这是目前Jaeger缺失的功能。
|
||||
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合,这是目前Jaeger缺失的功能。
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs Jaeger
|
||||
|
||||
Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们在计划中也有日志管理功能。
|
||||
Jaeger只做分布式追踪(distributed tracing),SigNoz则支持metrics,traces,logs ,即可视化的三大支柱。
|
||||
|
||||
并且SigNoz有一些Jaeger没有的高级功能:
|
||||
|
||||
- Jaegar UI无法在跟踪或过滤的跟踪基础上展示矩阵。
|
||||
- Jaeger不能在过滤的跟踪上进行聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟,在SigNoz里这很容易实现。
|
||||
- Jaegar UI无法在traces或过滤的traces上展示metrics。
|
||||
- Jaeger不能对过滤的traces做聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -122,6 +125,23 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们
|
||||
|
||||
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||
|
||||
## Supported Versions
|
||||
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||
|
||||
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||
|
||||
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thank you for keeping SigNoz and our users safe. 🙇
|
||||
File diff suppressed because it is too large
Load Diff
29
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
29
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -2,13 +2,23 @@ version: "3.9"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
@@ -17,42 +27,55 @@ services:
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.7.2
|
||||
image: signoz/query-service:0.11.2
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- clickhouse
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.7.2
|
||||
image: signoz/frontend:0.11.2
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
@@ -60,36 +83,43 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.43.0
|
||||
image: signoz/signoz-otel-collector:0.55.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
resources:
|
||||
limits:
|
||||
memory: 2000m
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.43.0
|
||||
image: signoz/signoz-otel-collector:0.55.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -1,33 +1,89 @@
|
||||
receivers:
|
||||
filelog/dockercontainers:
|
||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
||||
start_at: end
|
||||
include_file_path: true
|
||||
include_file_name: false
|
||||
operators:
|
||||
- type: json_parser
|
||||
id: parser-docker
|
||||
output: extract_metadata_from_filepath
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: regex_parser
|
||||
id: extract_metadata_from_filepath
|
||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
||||
parse_from: attributes["log.file.path"]
|
||||
output: parse_body
|
||||
- type: move
|
||||
id: parse_body
|
||||
from: attributes.log
|
||||
to: body
|
||||
output: time
|
||||
- type: remove
|
||||
id: time
|
||||
field: attributes.time
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
endpoint: localhost:12345
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
@@ -43,29 +99,58 @@ processors:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/
|
||||
timeout: 5s
|
||||
sending_queue:
|
||||
queue_size: 100
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
initial_interval: 5s
|
||||
max_interval: 30s
|
||||
max_elapsed_time: 300s
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics, prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
# otel-collector-metrics internal metrics
|
||||
- job_name: otel-collector-metrics
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
- targets:
|
||||
- localhost:8888
|
||||
# SigNoz span metrics
|
||||
- job_name: signozspanmetrics-collector
|
||||
scrape_interval: 60s
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- tasks.otel-collector
|
||||
type: A
|
||||
port: 8889
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
@@ -31,17 +37,26 @@ processors:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
exporters: [clickhousemetricswrite]
|
||||
|
||||
@@ -19,8 +19,7 @@ rule_files:
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
@@ -1,30 +1,41 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
proxy_pass http://query-service:8080/api;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
29
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
29
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -2,11 +2,22 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
container_name: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
@@ -15,70 +26,56 @@ services:
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
container_name: alertmanager
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.7.2
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-arm
|
||||
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.7.2
|
||||
container_name: frontend
|
||||
depends_on:
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.43.0
|
||||
container_name: otel-collector
|
||||
image: signoz/signoz-otel-collector:0.55.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy port
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
mem_limit: 2000m
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.43.0
|
||||
container_name: otel-collector-metrics
|
||||
image: signoz/signoz-otel-collector:0.55.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
@@ -93,7 +90,7 @@ services:
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
56
deploy/docker/clickhouse-setup/docker-compose-local.yaml
Normal file
56
deploy/docker/clickhouse-setup/docker-compose-local.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
query-service:
|
||||
hostname: query-service
|
||||
build:
|
||||
context: "../../../pkg/query-service"
|
||||
dockerfile: "./Dockerfile"
|
||||
args:
|
||||
LDFLAGS: ""
|
||||
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
|
||||
container_name: query-service
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
- "8080:8080"
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: "../../../frontend"
|
||||
dockerfile: "./Dockerfile"
|
||||
args:
|
||||
TARGETOS: "${LOCAL_GOOS}"
|
||||
TARGETPLATFORM: "${LOCAL_GOARCH}"
|
||||
container_name: frontend
|
||||
environment:
|
||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
@@ -2,11 +2,21 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
@@ -15,41 +25,55 @@ services:
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
image: signoz/alertmanager:0.23.0-0.2
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.7.2
|
||||
image: signoz/query-service:0.11.2
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.7.2
|
||||
image: signoz/frontend:0.11.2
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
@@ -57,46 +81,56 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.43.0
|
||||
image: signoz/signoz-otel-collector:0.55.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy port
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
mem_limit: 2000m
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.43.0
|
||||
image: signoz/signoz-otel-collector:0.55.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
|
||||
@@ -1,33 +1,85 @@
|
||||
receivers:
|
||||
filelog/dockercontainers:
|
||||
include: [ "/var/lib/docker/containers/*/*.log" ]
|
||||
start_at: end
|
||||
include_file_path: true
|
||||
include_file_name: false
|
||||
operators:
|
||||
- type: json_parser
|
||||
id: parser-docker
|
||||
output: extract_metadata_from_filepath
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: regex_parser
|
||||
id: extract_metadata_from_filepath
|
||||
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
|
||||
parse_from: attributes["log.file.path"]
|
||||
output: parse_body
|
||||
- type: move
|
||||
id: parse_body
|
||||
from: attributes.log
|
||||
to: body
|
||||
output: time
|
||||
- type: remove
|
||||
id: time
|
||||
field: attributes.time
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
endpoint: localhost:12345
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
@@ -43,29 +95,66 @@ processors:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/
|
||||
timeout: 5s
|
||||
sending_queue:
|
||||
queue_size: 100
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
initial_interval: 5s
|
||||
max_interval: 30s
|
||||
max_elapsed_time: 300s
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/generic:
|
||||
receivers: [hostmetrics, prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
exporters: [prometheus]
|
||||
logs:
|
||||
receivers: [otlp, filelog/dockercontainers]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -3,18 +3,26 @@ receivers:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
# otel-collector-metrics internal metrics
|
||||
- job_name: otel-collector-metrics
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
- targets:
|
||||
- localhost:8888
|
||||
# SigNoz span metrics
|
||||
- job_name: signozspanmetrics-collector
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- otel-collector:8889
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
@@ -31,17 +39,29 @@ processors:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
exporters: [clickhousemetricswrite]
|
||||
|
||||
@@ -19,8 +19,7 @@ rule_files:
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
@@ -1,33 +1,41 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api/alertmanager{
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
proxy_pass http://query-service:8080/api;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
@@ -1,273 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- coordinator_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_tiny/coordinator
|
||||
- environment_tiny/common
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_tiny/broker
|
||||
- environment_tiny/common
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- historical_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_tiny/historical
|
||||
- environment_tiny/common
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- middle_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_tiny/middlemanager
|
||||
- environment_tiny/common
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_tiny/router
|
||||
- environment_tiny/common
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,269 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- coordinator_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_small/coordinator
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_small/broker
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- historical_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_small/historical
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- middle_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_small/middlemanager
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_small/router
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ./locust-scripts:/locust
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=100MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=1g
|
||||
DRUID_XMS=1g
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=128m
|
||||
DRUID_XMS=128m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,26 +0,0 @@
|
||||
# For S3 storage
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
||||
|
||||
|
||||
# druid_storage_type=s3
|
||||
# druid_storage_bucket=<s3-bucket-name>
|
||||
# druid_storage_baseKey=druid/segments
|
||||
|
||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
||||
# AWS_REGION=<s3-aws-region>
|
||||
|
||||
# druid_indexer_logs_type=s3
|
||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# For local storage
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/data/segments
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,50 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,51 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -36,9 +36,9 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
# is_arm64(){
|
||||
# [[ `uname -m` == 'arm64' ]]
|
||||
# }
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
@@ -158,7 +158,9 @@ install_docker() {
|
||||
echo
|
||||
# yum install docker
|
||||
# service docker start
|
||||
$sudo_cmd amazon-linux-extras install docker
|
||||
$sudo_cmd yum install -y amazon-linux-extras
|
||||
$sudo_cmd amazon-linux-extras enable docker
|
||||
$sudo_cmd yum install -y docker
|
||||
else
|
||||
|
||||
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
|
||||
@@ -195,16 +197,21 @@ install_docker_compose() {
|
||||
|
||||
start_docker() {
|
||||
echo -e "🐳 Starting Docker ...\n"
|
||||
if [ $os = "Mac" ]; then
|
||||
if [[ $os == "Mac" ]]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||
echo "Starting docker service"
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
if [ -z $sudo_cmd ]; then
|
||||
docker ps > /dev/null && true
|
||||
if [ $? -ne 0 ]; then
|
||||
# if [[ -z $sudo_cmd ]]; then
|
||||
# docker ps > /dev/null && true
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# request_sudo
|
||||
# fi
|
||||
# fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
if ! docker ps > /dev/null && true; then
|
||||
request_sudo
|
||||
fi
|
||||
fi
|
||||
@@ -230,22 +237,18 @@ wait_for_containers_start() {
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
if [ "$?" -ne 0 ]; then
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
set +o errexit
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if is_arm64; then
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
|
||||
else
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
if [[ email == "" ]]; then
|
||||
if [[ $email == "" ]]; then
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
@@ -266,17 +269,21 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
|
||||
request_sudo() {
|
||||
if hash sudo 2>/dev/null; then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||
if ! $sudo_cmd -v && (( $EUID != 0 )); then
|
||||
echo -e "Please enter your sudo password now:"
|
||||
|
||||
if ! $sudo_cmd -v; then
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompt."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
# exit 1;
|
||||
# fi
|
||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
echo -e "Thanks! 🙏\n"
|
||||
echo -e "Got it! Thanks!! 🙏\n"
|
||||
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
|
||||
fi
|
||||
fi
|
||||
@@ -291,9 +298,6 @@ sudo_cmd=""
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
echo "🟡 Running installer with non-sudo permissions."
|
||||
if ! is_command_present docker; then
|
||||
$sudo_cmd docker ps
|
||||
fi
|
||||
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
|
||||
echo ""
|
||||
else
|
||||
@@ -308,8 +312,13 @@ echo -e "🌏 Detecting your OS ...\n"
|
||||
check_os
|
||||
|
||||
# Obtain unique installation id
|
||||
sysinfo="$(uname -a)"
|
||||
if [ $? -ne 0 ]; then
|
||||
# sysinfo="$(uname -a)"
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# uuid="$(uuidgen)"
|
||||
# uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
# sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
# fi
|
||||
if ! sysinfo="$(uname -a)"; then
|
||||
uuid="$(uuidgen)"
|
||||
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
@@ -324,7 +333,7 @@ elif hash openssl 2>/dev/null; then
|
||||
digest_cmd="openssl dgst -sha256"
|
||||
fi
|
||||
|
||||
if [ -z $digest_cmd ]; then
|
||||
if [[ -z $digest_cmd ]]; then
|
||||
SIGNOZ_INSTALLATION_ID="$sysinfo"
|
||||
else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
@@ -334,7 +343,6 @@ fi
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
@@ -347,8 +355,6 @@ fi
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# else
|
||||
# setup_type='druid'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
@@ -407,7 +413,7 @@ send_event() {
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$error" != "" ]; then
|
||||
if [[ "$error" != "" ]]; then
|
||||
error='"error": "'"$error"'", '
|
||||
fi
|
||||
|
||||
@@ -470,22 +476,14 @@ start_docker
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
if is_arm64; then
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
|
||||
else
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if is_arm64; then
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -514,11 +512,7 @@ else
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
|
||||
if is_arm64; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
fi
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
37
ee/LICENSE
Normal file
37
ee/LICENSE
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
The SigNoz Enterprise license (the "Enterprise License")
|
||||
Copyright (c) 2020 - present SigNoz Inc.
|
||||
|
||||
With regard to the SigNoz Software:
|
||||
|
||||
This software and associated documentation files (the "Software") may only be
|
||||
used in production, if you (and any entity that you represent) have agreed to,
|
||||
and are in compliance with, the SigNoz Subscription Terms of Service, available
|
||||
via email (hello@signoz.io) (the "Enterprise Terms"), or other
|
||||
agreement governing the use of the Software, as agreed by you and SigNoz,
|
||||
and otherwise have a valid SigNoz Enterprise license for the
|
||||
correct number of user seats. Subject to the foregoing sentence, you are free to
|
||||
modify this Software and publish patches to the Software. You agree that SigNoz
|
||||
and/or its licensors (as applicable) retain all right, title and interest in and
|
||||
to all such modifications and/or patches, and all such modifications and/or
|
||||
patches may only be used, copied, modified, displayed, distributed, or otherwise
|
||||
exploited with a valid SigNoz Enterprise license for the correct
|
||||
number of user seats. Notwithstanding the foregoing, you may copy and modify
|
||||
the Software for development and testing purposes, without requiring a
|
||||
subscription. You agree that SigNoz and/or its licensors (as applicable) retain
|
||||
all right, title and interest in and to all such modifications. You are not
|
||||
granted any other rights beyond what is expressly stated herein. Subject to the
|
||||
foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
|
||||
and/or sell the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
For all third party components incorporated into the SigNoz Software, those
|
||||
components are licensed under the original license provided by the owner of the
|
||||
applicable component.
|
||||
4
ee/query-service/.dockerignore
Normal file
4
ee/query-service/.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
README.md
|
||||
signoz.db
|
||||
bin
|
||||
48
ee/query-service/Dockerfile
Normal file
48
ee/query-service/Dockerfile
Normal file
@@ -0,0 +1,48 @@
|
||||
FROM golang:1.17-buster AS builder
|
||||
|
||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
||||
ARG LD_FLAGS
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
ENV CGO_ENABLED=1
|
||||
ENV GOPATH=/go
|
||||
|
||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
|
||||
|
||||
# Prepare and enter src directory
|
||||
WORKDIR /go/src/github.com/signoz/signoz
|
||||
|
||||
# Add the sources and proceed with build
|
||||
ADD . .
|
||||
RUN cd ee/query-service \
|
||||
&& go build -tags timetzdata -a -o ./bin/query-service \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
|
||||
&& chmod +x ./bin/query-service
|
||||
|
||||
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.7
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
|
||||
# add ca-certificates in case you need them
|
||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||
|
||||
# set working directory
|
||||
WORKDIR /root
|
||||
|
||||
# copy the binary from builder
|
||||
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
|
||||
|
||||
# copy prometheus YAML config
|
||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||
|
||||
# run the binary
|
||||
ENTRYPOINT ["./query-service"]
|
||||
|
||||
CMD ["-config", "../config/prometheus.yml"]
|
||||
# CMD ["./query-service -config /root/config/prometheus.yml"]
|
||||
|
||||
EXPOSE 8080
|
||||
124
ee/query-service/app/api/api.go
Normal file
124
ee/query-service/app/api/api.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
AppDao dao.ModelDao
|
||||
RulesManager *rules.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
opts APIHandlerOptions
|
||||
baseapp.APIHandler
|
||||
}
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ah := &APIHandler{
|
||||
opts: opts,
|
||||
APIHandler: *baseHandler,
|
||||
}
|
||||
return ah, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) FF() baseint.FeatureLookup {
|
||||
return ah.opts.FeatureFlags
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RM() *rules.Manager {
|
||||
return ah.opts.RulesManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) LM() *license.Manager {
|
||||
return ah.opts.LicenseManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||
return ah.opts.AppDao
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
err := ah.FF().CheckFeature(f)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
baseapp.AdminAccess(ah.listLicenses)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/licenses",
|
||||
baseapp.AdminAccess(ah.applyLicense)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
baseapp.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
baseapp.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
baseapp.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
baseapp.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
baseapp.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
baseapp.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
baseapp.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
ah.APIHandler.RegisterRoutes(router)
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
|
||||
}
|
||||
297
ee/query-service/app/api/auth.go
Normal file
297
ee/query-service/app/api/auth.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func parseRequest(r *http.Request, req interface{}) error {
|
||||
defer r.Body.Close()
|
||||
requestBody, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
return err
|
||||
}
|
||||
|
||||
// loginUser overrides base handler and considers SSO case.
|
||||
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := basemodel.LoginRequest{}
|
||||
err := parseRequest(r, &req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||
var apierr basemodel.BaseApiError
|
||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||
if apierr != nil && !apierr.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// if all looks good, call auth
|
||||
resp, err := auth.Login(ctx, &req)
|
||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// registerUser registers a user and responds with a precheck
|
||||
// so the front-end can decide the login method
|
||||
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
ah.APIHandler.Register(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var req *baseauth.RegisterRequest
|
||||
|
||||
defer r.Body.Close()
|
||||
requestBody, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
zap.S().Errorf("received no input in api\n", err)
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("received invalid user registration request", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil || invite == nil {
|
||||
zap.S().Errorf("failed to validate invite token", err)
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||
if apierr != nil {
|
||||
zap.S().Errorf("failed to get domain from email", apierr)
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &model.PrecheckResponse{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// so is enabled, create user and respond precheck data
|
||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var precheckError basemodel.BaseApiError
|
||||
|
||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||
if precheckError != nil {
|
||||
RespondError(w, precheckError, precheckResp)
|
||||
}
|
||||
|
||||
} else {
|
||||
// no-sso, validate password
|
||||
if err := auth.ValidatePassword(req.Password); err != nil {
|
||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, registerError := baseauth.Register(ctx, req)
|
||||
if !registerError.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp.IsUser = true
|
||||
}
|
||||
|
||||
ah.Respond(w, precheckResp)
|
||||
}
|
||||
|
||||
// getInvite returns the invite object details for the given invite token. We do not need to
|
||||
// protect this API because invite token itself is meant to be private.
|
||||
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
token := mux.Vars(r)["token"]
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
ctx := context.Background()
|
||||
|
||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp := model.GettableInvitation{
|
||||
InvitationResponseObject: inviteObject,
|
||||
}
|
||||
|
||||
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
|
||||
resp.Precheck = precheck
|
||||
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// PrecheckLogin enables browser login page to display appropriate
|
||||
// login methods
|
||||
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
email := r.URL.Query().Get("email")
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
|
||||
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
var apierr basemodel.BaseApiError
|
||||
|
||||
redirectOnError := func() {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
base64.StdEncoding.Encode(dst, ssoError)
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, string(dst)), http.StatusMovedPermanently)
|
||||
}
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.S().Errorf("[ReceiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
// the relay state is sent when a login request is submitted to
|
||||
// Idp.
|
||||
relayState := r.FormValue("RelayState")
|
||||
zap.S().Debug("[ReceiveML] relay state", zap.String("relayState", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// derive domain id from relay state now
|
||||
var domainIdStr string
|
||||
for k, v := range parsedState.Query() {
|
||||
if k == "domainId" && len(v) > 0 {
|
||||
domainIdStr = strings.Replace(v[0], ":", "-", -1)
|
||||
}
|
||||
}
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process request- failed to parse domain id ifrom relay", zap.Error(err))
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
domain, apierr := ah.AppDao().GetDomain(ctx, domainId)
|
||||
if (apierr != nil) || domain == nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to process request- invalid domain", domainIdStr, zap.Error(apierr))
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to prepare saml request for domain (%s): %v", domainId, err)
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domainId, err)
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
zap.S().Errorf("[ReceiveSAML] expired saml response for organization (%s): %v", domainId, err)
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
email := assertionInfo.NameID
|
||||
|
||||
// user email found, now start preparing jwt response
|
||||
userPayload, baseapierr := ah.AppDao().GetUserByEmail(ctx, email)
|
||||
if baseapierr != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to find or register a new user for email %s and org %s", email, domainId, zap.Error(baseapierr.Err))
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
|
||||
if err != nil {
|
||||
zap.S().Errorf("[ReceiveSAML] failed to generate access token for email %s and org %s", email, domainId, zap.Error(err))
|
||||
redirectOnError()
|
||||
return
|
||||
}
|
||||
|
||||
userID := userPayload.User.Id
|
||||
nextPage := fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
|
||||
redirectUri,
|
||||
tokenStore.AccessJwt,
|
||||
userID,
|
||||
tokenStore.RefreshJwt)
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusMovedPermanently)
|
||||
}
|
||||
90
ee/query-service/app/api/domains.go
Normal file
90
ee/query-service/app/api/domains.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
orgId := mux.Vars(r)["orgId"]
|
||||
domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, domains)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, domains)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.OrgDomain{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.ValidNew(); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.OrgDomain{Id: domainId}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
req.Id = domainId
|
||||
if err := req.Valid(nil); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, nil)
|
||||
}
|
||||
10
ee/query-service/app/api/featureFlags.go
Normal file
10
ee/query-service/app/api/featureFlags.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
featureSet := ah.FF().GetFeatureFlags()
|
||||
ah.Respond(w, featureSet)
|
||||
}
|
||||
40
ee/query-service/app/api/license.go
Normal file
40
ee/query-service/app/api/license.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
|
||||
licenses, apiError := ah.LM().GetLicenses(context.Background())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
}
|
||||
ah.Respond(w, licenses)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
var l model.License
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if l.Key == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
license, apiError := ah.LM().Activate(ctx, l.Key)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, license)
|
||||
}
|
||||
12
ee/query-service/app/api/response.go
Normal file
12
ee/query-service/app/api/response.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
||||
baseapp.RespondError(w, apiErr, data)
|
||||
}
|
||||
28
ee/query-service/app/db/reader.go
Normal file
28
ee/query-service/app/db/reader.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
)
|
||||
|
||||
type ClickhouseReader struct {
|
||||
conn clickhouse.Conn
|
||||
appdb *sqlx.DB
|
||||
*basechr.ClickHouseReader
|
||||
}
|
||||
|
||||
func NewDataConnector(localDB *sqlx.DB, promConfigPath string) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
ClickHouseReader: ch,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) Start(readerReady chan bool) {
|
||||
r.ClickHouseReader.Start(readerReady)
|
||||
}
|
||||
431
ee/query-service/app/server.go
Normal file
431
ee/query-service/app/server.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ServerOptions struct {
|
||||
PromConfigPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
type Server struct {
|
||||
serverOptions *ServerOptions
|
||||
conn net.Listener
|
||||
ruleManager *rules.Manager
|
||||
separatePorts bool
|
||||
|
||||
// public http router
|
||||
httpConn net.Listener
|
||||
httpServer *http.Server
|
||||
|
||||
// private http
|
||||
privateConn net.Listener
|
||||
privateHTTP *http.Server
|
||||
|
||||
// feature flags
|
||||
featureLookup baseint.FeatureLookup
|
||||
|
||||
unavailableChannel chan healthcheck.Status
|
||||
}
|
||||
|
||||
// HealthCheckStatus returns health check status channel a client can subscribe to
|
||||
func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
return s.unavailableChannel
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set license manager as feature flag provider in dao
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
storage := os.Getenv("STORAGE")
|
||||
if storage == "clickhouse" {
|
||||
zap.S().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
} else {
|
||||
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
baseconst.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
reader,
|
||||
serverOptions.DisableRules)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
// logger: logger,
|
||||
// tracer: tracer,
|
||||
ruleManager: rm,
|
||||
serverOptions: serverOptions,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.httpServer = httpServer
|
||||
|
||||
privateServer, err := s.createPrivateServer(apiHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
|
||||
r := mux.NewRouter()
|
||||
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddlewarePrivate)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
//todo(amol): find out a way to add exact domain or
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
|
||||
r := mux.NewRouter()
|
||||
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
|
||||
apiHandler.RegisterRoutes(r)
|
||||
apiHandler.RegisterMetricsRoutes(r)
|
||||
apiHandler.RegisterLogsRoutes(r)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
})
|
||||
}
|
||||
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
|
||||
})
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
|
||||
// we default to that status code.
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// Flush implements the http.Flush interface.
|
||||
func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||
|
||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var cancel context.CancelFunc
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// initListeners initialises listeners of the server
|
||||
func (s *Server) initListeners() error {
|
||||
// listen on public port
|
||||
var err error
|
||||
publicHostPort := s.serverOptions.HTTPHostPort
|
||||
if publicHostPort == "" {
|
||||
return fmt.Errorf("baseconst.HTTPHostPort is required")
|
||||
}
|
||||
|
||||
s.httpConn, err = net.Listen("tcp", publicHostPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
||||
|
||||
// listen on private port to support internal services
|
||||
privateHostPort := s.serverOptions.PrivateHostPort
|
||||
|
||||
if privateHostPort == "" {
|
||||
return fmt.Errorf("baseconst.PrivateHostPort is required")
|
||||
}
|
||||
|
||||
s.privateConn, err = net.Listen("tcp", privateHostPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start listening on http and private http port concurrently
|
||||
func (s *Server) Start() error {
|
||||
|
||||
// initiate rule manager first
|
||||
if !s.serverOptions.DisableRules {
|
||||
s.ruleManager.Start()
|
||||
} else {
|
||||
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||
}
|
||||
|
||||
err := s.initListeners()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpPort int
|
||||
if port, err := utils.GetPort(s.httpConn.Addr()); err == nil {
|
||||
httpPort = port
|
||||
}
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
||||
|
||||
switch err := s.httpServer.Serve(s.httpConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
// normal exit, nothing to do
|
||||
default:
|
||||
zap.S().Error("Could not start HTTP server", zap.Error(err))
|
||||
}
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
}()
|
||||
|
||||
go func() {
|
||||
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
|
||||
|
||||
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
|
||||
if err != nil {
|
||||
zap.S().Error("Could not start pprof server", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
var privatePort int
|
||||
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
|
||||
privatePort = port
|
||||
}
|
||||
fmt.Println("starting private http")
|
||||
go func() {
|
||||
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||
|
||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
// normal exit, nothing to do
|
||||
zap.S().Info("private http server closed")
|
||||
default:
|
||||
zap.S().Error("Could not start private HTTP server", zap.Error(err))
|
||||
}
|
||||
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRulesManager(
|
||||
promConfigPath,
|
||||
alertManagerURL string,
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
disableRules bool) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// notifier opts
|
||||
notifierOpts := basealm.NotifierOptions{
|
||||
QueueCapacity: 10000,
|
||||
Timeout: 1 * time.Second,
|
||||
AlertManagerURLs: []string{alertManagerURL},
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &rules.ManagerOptions{
|
||||
NotifierOpts: notifierOpts,
|
||||
Queriers: &rules.Queriers{
|
||||
PqlEngine: pqle,
|
||||
Ch: ch.GetConn(),
|
||||
},
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
Logger: nil,
|
||||
DisableRules: disableRules,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
manager, err := rules.NewManager(managerOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule manager error: %v", err)
|
||||
}
|
||||
|
||||
zap.S().Info("rules manager is ready")
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
28
ee/query-service/constants/constants.go
Normal file
28
ee/query-service/constants/constants.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package constants
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSiteURL = "https://localhost:3301"
|
||||
)
|
||||
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
if len(v) == 0 {
|
||||
return fallback
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// constant functions that override env vars
|
||||
|
||||
// GetDefaultSiteURL returns default site url, primarily
|
||||
// used to send saml request and allowing backend to
|
||||
// handle http redirect
|
||||
func GetDefaultSiteURL() string {
|
||||
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
|
||||
}
|
||||
18
ee/query-service/dao/factory.go
Normal file
18
ee/query-service/dao/factory.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
)
|
||||
|
||||
func InitDao(engine, path string) (ModelDao, error) {
|
||||
|
||||
switch engine {
|
||||
case "sqlite":
|
||||
return sqlite.InitDB(path)
|
||||
default:
|
||||
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
|
||||
}
|
||||
|
||||
}
|
||||
33
ee/query-service/dao/interface.go
Normal file
33
ee/query-service/dao/interface.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type ModelDao interface {
|
||||
basedao.ModelDao
|
||||
|
||||
// SetFlagProvider sets the feature lookup provider
|
||||
SetFlagProvider(flags baseint.FeatureLookup)
|
||||
|
||||
DB() *sqlx.DB
|
||||
|
||||
// auth methods
|
||||
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
|
||||
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
|
||||
|
||||
// org domain (auth domains) CRUD ops
|
||||
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
|
||||
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
|
||||
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
|
||||
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
|
||||
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
|
||||
}
|
||||
112
ee/query-service/dao/sqlite/auth.go
Normal file
112
ee/query-service/dao/sqlite/auth.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
|
||||
domain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
return false, apierr
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, check if the user has admin role
|
||||
userPayload, baseapierr := m.GetUserByEmail(ctx, email)
|
||||
|
||||
if baseapierr != nil || userPayload == nil {
|
||||
return false, baseapierr
|
||||
}
|
||||
|
||||
if userPayload.Role != baseconst.AdminGroup {
|
||||
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PrecheckLogin is called when the login or signup page is loaded
|
||||
// to check sso login is to be prompted
|
||||
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
|
||||
|
||||
// assume user is valid unless proven otherwise
|
||||
resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
|
||||
|
||||
// check if email is a valid user
|
||||
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
|
||||
if baseApiErr != nil {
|
||||
return resp, baseApiErr
|
||||
}
|
||||
|
||||
if userPayload == nil {
|
||||
resp.IsUser = false
|
||||
}
|
||||
ssoAvailable := true
|
||||
err := m.checkFeature(model.SSO)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case basemodel.ErrFeatureUnavailable:
|
||||
// do nothing, just skip sso
|
||||
ssoAvailable = false
|
||||
default:
|
||||
zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
|
||||
return resp, model.BadRequest(err)
|
||||
}
|
||||
}
|
||||
|
||||
if ssoAvailable {
|
||||
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
var emailDomain string
|
||||
emailComponents := strings.Split(email, "@")
|
||||
if len(emailComponents) > 0 {
|
||||
emailDomain = emailComponents[1]
|
||||
}
|
||||
zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
|
||||
return resp, apierr
|
||||
}
|
||||
|
||||
if orgDomain != nil && orgDomain.SsoEnabled {
|
||||
// saml is enabled for this domain, lets prepare sso url
|
||||
|
||||
if sourceUrl == "" {
|
||||
sourceUrl = constants.GetDefaultSiteURL()
|
||||
}
|
||||
|
||||
// parse source url that generated the login request
|
||||
var err error
|
||||
escapedUrl, _ := url.QueryUnescape(sourceUrl)
|
||||
siteUrl, err := url.Parse(escapedUrl)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to parse referer", err)
|
||||
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
|
||||
}
|
||||
|
||||
// build Idp URL that will authenticat the user
|
||||
// the front-end will redirect user to this url
|
||||
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
|
||||
return resp, model.InternalError(err)
|
||||
}
|
||||
|
||||
// set SSO to true, as the url is generated correctly
|
||||
resp.SSO = true
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
183
ee/query-service/dao/sqlite/domain.go
Normal file
183
ee/query-service/dao/sqlite/domain.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// StoredDomain represents stored database record for org domain
|
||||
|
||||
type StoredDomain struct {
|
||||
Id uuid.UUID `db:"id"`
|
||||
Name string `db:"name"`
|
||||
OrgId string `db:"org_id"`
|
||||
Data string `db:"data"`
|
||||
CreatedAt int64 `db:"created_at"`
|
||||
UpdatedAt int64 `db:"updated_at"`
|
||||
}
|
||||
|
||||
// GetDomain returns org domain for a given domain id
|
||||
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return domain, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// ListDomains gets the list of auth domains by org id
|
||||
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
|
||||
domains := []model.OrgDomain{}
|
||||
|
||||
stored := []StoredDomain{}
|
||||
err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return []model.OrgDomain{}, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
for _, s := range stored {
|
||||
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
|
||||
if err := domain.LoadConfig(s.Data); err != nil {
|
||||
zap.S().Errorf("ListDomains() failed", zap.Error(err))
|
||||
}
|
||||
domains = append(domains, domain)
|
||||
}
|
||||
|
||||
return domains, nil
|
||||
}
|
||||
|
||||
// CreateDomain creates a new auth domain
|
||||
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
domain.Id = uuid.New()
|
||||
}
|
||||
|
||||
if domain.OrgId == "" || domain.Name == "" {
|
||||
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
"INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
domain.Id,
|
||||
domain.Name,
|
||||
domain.OrgId,
|
||||
configJson,
|
||||
time.Now().Unix(),
|
||||
time.Now().Unix())
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to insert domain in db", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain creation failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateDomain updates stored config params for a domain
|
||||
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
|
||||
|
||||
if domain.Id == uuid.Nil {
|
||||
zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
configJson, err := json.Marshal(domain)
|
||||
if err != nil {
|
||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
_, err = m.DB().ExecContext(ctx,
|
||||
"UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
|
||||
configJson,
|
||||
time.Now().Unix(),
|
||||
domain.Id)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("domain update failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain update failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDomain deletes an org domain
|
||||
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
|
||||
|
||||
if id == uuid.Nil {
|
||||
zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
_, err := m.DB().ExecContext(ctx,
|
||||
"DELETE FROM org_domains WHERE id = $1",
|
||||
id)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("domain delete failed", zap.Error(err))
|
||||
return model.InternalError(fmt.Errorf("domain delete failed"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
|
||||
|
||||
if email == "" {
|
||||
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
|
||||
}
|
||||
|
||||
components := strings.Split(email, "@")
|
||||
if len(components) < 2 {
|
||||
return nil, model.BadRequest(fmt.Errorf("invalid email address"))
|
||||
}
|
||||
|
||||
parsedDomain := components[1]
|
||||
|
||||
stored := StoredDomain{}
|
||||
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
|
||||
if err := domain.LoadConfig(stored.Data); err != nil {
|
||||
return domain, model.InternalError(err)
|
||||
}
|
||||
return domain, nil
|
||||
}
|
||||
63
ee/query-service/dao/sqlite/modelDao.go
Normal file
63
ee/query-service/dao/sqlite/modelDao.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
basedao "go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
type modelDao struct {
|
||||
*basedsql.ModelDaoSqlite
|
||||
flags baseint.FeatureLookup
|
||||
}
|
||||
|
||||
// SetFlagProvider sets the feature lookup provider
|
||||
func (m *modelDao) SetFlagProvider(flags baseint.FeatureLookup) {
|
||||
m.flags = flags
|
||||
}
|
||||
|
||||
// CheckFeature confirms if a feature is available
|
||||
func (m *modelDao) checkFeature(key string) error {
|
||||
if m.flags == nil {
|
||||
return fmt.Errorf("flag provider not set")
|
||||
}
|
||||
|
||||
return m.flags.CheckFeature(key)
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// set package variable so dependent base methods (e.g. AuthCache) will work
|
||||
basedao.SetDB(dao)
|
||||
m := &modelDao{ModelDaoSqlite: dao}
|
||||
|
||||
table_schema := `
|
||||
PRAGMA foreign_keys = ON;
|
||||
CREATE TABLE IF NOT EXISTS org_domains(
|
||||
id TEXT PRIMARY KEY,
|
||||
org_id TEXT NOT NULL,
|
||||
name VARCHAR(50) NOT NULL UNIQUE,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER,
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY(org_id) REFERENCES organizations(id)
|
||||
);`
|
||||
|
||||
_, err = m.DB().Exec(table_schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *modelDao) DB() *sqlx.DB {
|
||||
return m.ModelDaoSqlite.DB()
|
||||
}
|
||||
20
ee/query-service/integrations/signozio/response.go
Normal file
20
ee/query-service/integrations/signozio/response.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package signozio
|
||||
|
||||
type status string
|
||||
|
||||
const (
|
||||
statusSuccess status = "success"
|
||||
statusError status = "error"
|
||||
)
|
||||
|
||||
type ActivationResult struct {
|
||||
Status status `json:"status"`
|
||||
Data *ActivationResponse `json:"data,omitempty"`
|
||||
ErrorType string `json:"errorType,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type ActivationResponse struct {
|
||||
ActivationId string `json:"ActivationId"`
|
||||
PlanDetails string `json:"PlanDetails"`
|
||||
}
|
||||
159
ee/query-service/integrations/signozio/signozio.go
Normal file
159
ee/query-service/integrations/signozio/signozio.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package signozio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var C *Client
|
||||
|
||||
const (
|
||||
POST = "POST"
|
||||
APPLICATION_JSON = "application/json"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
return &Client{
|
||||
Prefix: constants.LicenseSignozIo,
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
C = New()
|
||||
}
|
||||
|
||||
// ActivateLicense sends key to license.signoz.io and gets activation data
|
||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
|
||||
licenseReq := map[string]string{
|
||||
"key": key,
|
||||
"siteId": siteId,
|
||||
}
|
||||
|
||||
reqString, _ := json.Marshal(licenseReq)
|
||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to connect to license.signoz.io", err)
|
||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
httpBody, err := ioutil.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
|
||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer httpResponse.Body.Close()
|
||||
|
||||
// read api request result
|
||||
result := ActivationResult{}
|
||||
err = json.Unmarshal(httpBody, &result)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
|
||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||
}
|
||||
|
||||
switch httpResponse.StatusCode {
|
||||
case 200, 201:
|
||||
return result.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
default:
|
||||
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ValidateLicense validates the license key
|
||||
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
|
||||
validReq := map[string]string{
|
||||
"activationId": activationId,
|
||||
}
|
||||
|
||||
reqString, _ := json.Marshal(validReq)
|
||||
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 200, 201:
|
||||
a := ActivationResult{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
}
|
||||
return a.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, POST, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Content-Type", contentType)
|
||||
return req, err
|
||||
|
||||
}
|
||||
|
||||
// SendUsage reports the usage of signoz to license server
|
||||
func SendUsage(ctx context.Context, usage *model.UsagePayload) *model.ApiError {
|
||||
reqString, _ := json.Marshal(usage)
|
||||
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
switch res.StatusCode {
|
||||
case 200, 201:
|
||||
return nil
|
||||
case 400, 401:
|
||||
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
}
|
||||
12
ee/query-service/interfaces/connector.go
Normal file
12
ee/query-service/interfaces/connector.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
// Connector defines methods for interaction
|
||||
// with o11y data. for example - clickhouse
|
||||
type DataConnector interface {
|
||||
Start(readerReady chan bool)
|
||||
baseint.Reader
|
||||
}
|
||||
127
ee/query-service/license/db.go
Normal file
127
ee/query-service/license/db.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/license/sqlite"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Repo is license repo. stores license keys in a secured DB
|
||||
type Repo struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewLicenseRepo initiates a new license repo
|
||||
func NewLicenseRepo(db *sqlx.DB) Repo {
|
||||
return Repo{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) InitDB(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
||||
licenses := []model.License{}
|
||||
|
||||
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
|
||||
|
||||
err := r.db.Select(&licenses, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
|
||||
}
|
||||
|
||||
return licenses, nil
|
||||
}
|
||||
|
||||
// GetActiveLicense fetches the latest active license from DB
|
||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
|
||||
var err error
|
||||
licenses := []model.License{}
|
||||
|
||||
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
|
||||
|
||||
err = r.db.Select(&licenses, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get active licenses from db: %v", err)
|
||||
}
|
||||
|
||||
var active *model.License
|
||||
for _, l := range licenses {
|
||||
l.ParsePlan()
|
||||
if active == nil &&
|
||||
(l.ValidFrom != 0) &&
|
||||
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
|
||||
active = &l
|
||||
}
|
||||
if active != nil &&
|
||||
l.ValidFrom > active.ValidFrom &&
|
||||
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
|
||||
active = &l
|
||||
}
|
||||
}
|
||||
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// InsertLicense inserts a new license in db
|
||||
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
|
||||
|
||||
if l.Key == "" {
|
||||
return fmt.Errorf("insert license failed: license key is required")
|
||||
}
|
||||
|
||||
query := `INSERT INTO licenses
|
||||
(key, planDetails, activationId, validationmessage)
|
||||
VALUES ($1, $2, $3, $4)`
|
||||
|
||||
_, err := r.db.ExecContext(ctx,
|
||||
query,
|
||||
l.Key,
|
||||
l.PlanDetails,
|
||||
l.ActivationId,
|
||||
l.ValidationMessage)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("error in inserting license data: ", zap.Error(err))
|
||||
return fmt.Errorf("failed to insert license in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePlanDetails writes new plan details to the db
|
||||
func (r *Repo) UpdatePlanDetails(ctx context.Context,
|
||||
key,
|
||||
planDetails string) error {
|
||||
|
||||
if key == "" {
|
||||
return fmt.Errorf("Update Plan Details failed: license key is required")
|
||||
}
|
||||
|
||||
query := `UPDATE licenses
|
||||
SET planDetails = $1,
|
||||
updatedAt = $2
|
||||
WHERE key = $3`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("error in updating license: ", zap.Error(err))
|
||||
return fmt.Errorf("failed to update license in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
311
ee/query-service/license/manager.go
Normal file
311
ee/query-service/license/manager.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package license
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"sync"
|
||||
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
|
||||
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var LM *Manager
|
||||
|
||||
// validate and update license every 24 hours
|
||||
var validationFrequency = 24 * 60 * time.Minute
|
||||
|
||||
type Manager struct {
|
||||
repo *Repo
|
||||
mutex sync.Mutex
|
||||
|
||||
validatorRunning bool
|
||||
|
||||
// end the license validation, this is important to gracefully
|
||||
// stopping validation and protect in-consistent updates
|
||||
done chan struct{}
|
||||
|
||||
// terminated waits for the validate go routine to end
|
||||
terminated chan struct{}
|
||||
|
||||
// last time the license was validated
|
||||
lastValidated int64
|
||||
|
||||
// keep track of validation failure attempts
|
||||
failedAttempts uint64
|
||||
|
||||
// keep track of active license and features
|
||||
activeLicense *model.License
|
||||
activeFeatures basemodel.FeatureSet
|
||||
}
|
||||
|
||||
func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
|
||||
|
||||
if LM != nil {
|
||||
return LM, nil
|
||||
}
|
||||
|
||||
repo := NewLicenseRepo(db)
|
||||
err := repo.InitDB(dbType)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
repo: &repo,
|
||||
}
|
||||
|
||||
if err := m.start(); err != nil {
|
||||
return m, err
|
||||
}
|
||||
LM = m
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// start loads active license in memory and initiates validator
|
||||
func (lm *Manager) start() error {
|
||||
err := lm.LoadActiveLicense()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (lm *Manager) Stop() {
|
||||
close(lm.done)
|
||||
<-lm.terminated
|
||||
}
|
||||
|
||||
func (lm *Manager) SetActive(l *model.License) {
|
||||
lm.mutex.Lock()
|
||||
defer lm.mutex.Unlock()
|
||||
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lm.activeLicense = l
|
||||
lm.activeFeatures = l.FeatureSet
|
||||
// set default features
|
||||
setDefaultFeatures(lm)
|
||||
if !lm.validatorRunning {
|
||||
// we want to make sure only one validator runs,
|
||||
// we already have lock() so good to go
|
||||
lm.validatorRunning = true
|
||||
go lm.Validator(context.Background())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setDefaultFeatures(lm *Manager) {
|
||||
for k, v := range baseconstants.DEFAULT_FEATURE_SET {
|
||||
lm.activeFeatures[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// LoadActiveLicense loads the most recent active license
|
||||
func (lm *Manager) LoadActiveLicense() error {
|
||||
var err error
|
||||
active, err := lm.repo.GetActiveLicense(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if active != nil {
|
||||
lm.SetActive(active)
|
||||
} else {
|
||||
zap.S().Info("No active license found, defaulting to basic plan")
|
||||
// if no active license is found, we default to basic(free) plan with all default features
|
||||
lm.activeFeatures = basemodel.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
|
||||
|
||||
licenses, err := lm.repo.GetLicenses(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
for _, l := range licenses {
|
||||
l.ParsePlan()
|
||||
|
||||
if l.Key == lm.activeLicense.Key {
|
||||
l.IsCurrent = true
|
||||
}
|
||||
|
||||
if l.ValidUntil == -1 {
|
||||
// for subscriptions, there is no end-date as such
|
||||
// but for showing user some validity we default one year timespan
|
||||
l.ValidUntil = l.ValidFrom + 31556926
|
||||
}
|
||||
|
||||
response = append(response, l)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Validator validates license after an epoch of time
|
||||
func (lm *Manager) Validator(ctx context.Context) {
|
||||
defer close(lm.terminated)
|
||||
tick := time.NewTicker(validationFrequency)
|
||||
defer tick.Stop()
|
||||
|
||||
lm.Validate(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
lm.Validate(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the current active license
|
||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||
zap.S().Info("License validation started")
|
||||
if lm.activeLicense == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
lm.mutex.Lock()
|
||||
|
||||
lm.lastValidated = time.Now().Unix()
|
||||
if reterr != nil {
|
||||
zap.S().Errorf("License validation completed with error", reterr)
|
||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||
map[string]interface{}{"err": reterr.Error()})
|
||||
} else {
|
||||
zap.S().Info("License validation completed with no errors")
|
||||
}
|
||||
|
||||
lm.mutex.Unlock()
|
||||
}()
|
||||
|
||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||
if apiError != nil {
|
||||
zap.S().Errorf("failed to validate license", apiError)
|
||||
return apiError.Err
|
||||
}
|
||||
|
||||
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
||||
// license plan hasnt changed, nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
if response.PlanDetails != "" {
|
||||
|
||||
// copy and replace the active license record
|
||||
l := model.License{
|
||||
Key: lm.activeLicense.Key,
|
||||
CreatedAt: lm.activeLicense.CreatedAt,
|
||||
PlanDetails: response.PlanDetails,
|
||||
ValidationMessage: lm.activeLicense.ValidationMessage,
|
||||
ActivationId: lm.activeLicense.ActivationId,
|
||||
}
|
||||
|
||||
if err := l.ParsePlan(); err != nil {
|
||||
zap.S().Errorf("failed to parse updated license", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// updated plan is parsable, check if plan has changed
|
||||
if lm.activeLicense.PlanDetails != response.PlanDetails {
|
||||
err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
|
||||
if err != nil {
|
||||
// unexpected db write issue but we can let the user continue
|
||||
// and wait for update to work in next cycle.
|
||||
zap.S().Errorf("failed to validate license", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// activate the update license plan
|
||||
lm.SetActive(&l)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Activate activates a license key with signoz server
|
||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||
map[string]interface{}{"err": errResponse.Err.Error()})
|
||||
}
|
||||
}()
|
||||
|
||||
response, apiError := validate.ActivateLicense(key, "")
|
||||
if apiError != nil {
|
||||
zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
|
||||
return nil, apiError
|
||||
}
|
||||
|
||||
l := &model.License{
|
||||
Key: key,
|
||||
ActivationId: response.ActivationId,
|
||||
PlanDetails: response.PlanDetails,
|
||||
}
|
||||
|
||||
// parse validity and features from the plan details
|
||||
err := l.ParsePlan()
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
// store the license before activating it
|
||||
err = lm.repo.InsertLicense(ctx, l)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
// license is valid, activate it
|
||||
lm.SetActive(l)
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// CheckFeature will be internally used by backend routines
|
||||
// for feature gating
|
||||
func (lm *Manager) CheckFeature(featureKey string) error {
|
||||
if value, ok := lm.activeFeatures[featureKey]; ok {
|
||||
if value {
|
||||
return nil
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
return basemodel.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
|
||||
// GetFeatureFlags returns current active features
|
||||
func (lm *Manager) GetFeatureFlags() basemodel.FeatureSet {
|
||||
return lm.activeFeatures
|
||||
}
|
||||
|
||||
// GetRepo return the license repo
|
||||
func (lm *Manager) GetRepo() *Repo {
|
||||
return lm.repo
|
||||
}
|
||||
37
ee/query-service/license/sqlite/init.go
Normal file
37
ee/query-service/license/sqlite/init.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS licenses(
|
||||
key TEXT PRIMARY KEY,
|
||||
createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updatedAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
planDetails TEXT,
|
||||
activationId TEXT,
|
||||
validationMessage TEXT,
|
||||
lastValidated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sites(
|
||||
uuid TEXT PRIMARY KEY,
|
||||
alias VARCHAR(180) DEFAULT 'PROD',
|
||||
url VARCHAR(300),
|
||||
createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in creating licenses table: %s", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
90
ee/query-service/main.go
Normal file
90
ee/query-service/main.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func initZapLog() *zap.Logger {
|
||||
config := zap.NewDevelopmentConfig()
|
||||
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath string
|
||||
|
||||
// disables rule execution but allows change to the rule definition
|
||||
var disableRules bool
|
||||
|
||||
// the url used to build link in the alert messages in slack and other systems
|
||||
var ruleRepoURL string
|
||||
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog()
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
|
||||
logger := loggerMgr.Sugar()
|
||||
version.PrintVersion()
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
|
||||
if len(auth.JwtSecret) == 0 {
|
||||
zap.S().Warn("No JWT secret key is specified.")
|
||||
} else {
|
||||
zap.S().Info("No JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := server.Start(); err != nil {
|
||||
logger.Fatal("Could not start servers", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := auth.InitAuthCache(context.Background()); err != nil {
|
||||
logger.Fatal("Failed to initialize auth cache", zap.Error(err))
|
||||
}
|
||||
|
||||
signalsChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
for {
|
||||
select {
|
||||
case status := <-server.HealthCheckStatus():
|
||||
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
|
||||
case <-signalsChannel:
|
||||
logger.Fatal("Received OS Interrupt Signal ... ")
|
||||
}
|
||||
}
|
||||
}
|
||||
21
ee/query-service/model/auth.go
Normal file
21
ee/query-service/model/auth.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
// PrecheckResponse contains login precheck response
|
||||
type PrecheckResponse struct {
|
||||
SSO bool `json:"sso"`
|
||||
SsoUrl string `json:"ssoUrl"`
|
||||
CanSelfRegister bool `json:"canSelfRegister"`
|
||||
IsUser bool `json:"isUser"`
|
||||
SsoError string `json:"ssoError"`
|
||||
}
|
||||
|
||||
// GettableInvitation overrides base object and adds precheck into
|
||||
// response
|
||||
type GettableInvitation struct {
|
||||
*basemodel.InvitationResponseObject
|
||||
Precheck *PrecheckResponse `json:"precheck"`
|
||||
}
|
||||
142
ee/query-service/model/domain.go
Normal file
142
ee/query-service/model/domain.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
saml2 "github.com/russellhaering/gosaml2"
|
||||
"go.signoz.io/signoz/ee/query-service/saml"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type SSOType string
|
||||
|
||||
const (
|
||||
SAML SSOType = "SAML"
|
||||
GoogleAuth SSOType = "GOOGLE_AUTH"
|
||||
)
|
||||
|
||||
type SamlConfig struct {
|
||||
SamlEntity string `json:"samlEntity"`
|
||||
SamlIdp string `json:"samlIdp"`
|
||||
SamlCert string `json:"samlCert"`
|
||||
}
|
||||
|
||||
// OrgDomain identify org owned web domains for auth and other purposes
|
||||
type OrgDomain struct {
|
||||
Id uuid.UUID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
OrgId string `json:"orgId"`
|
||||
SsoEnabled bool `json:"ssoEnabled"`
|
||||
SsoType SSOType `json:"ssoType"`
|
||||
SamlConfig *SamlConfig `json:"samlConfig"`
|
||||
Org *basemodel.Organization
|
||||
}
|
||||
|
||||
// Valid is used a pipeline function to check if org domain
|
||||
// loaded from db is valid
|
||||
func (od *OrgDomain) Valid(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if od.Id == uuid.Nil || od.OrgId == "" {
|
||||
return fmt.Errorf("both id and orgId are required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidNew cheks if the org domain is valid for insertion in db
|
||||
func (od *OrgDomain) ValidNew() error {
|
||||
|
||||
if od.OrgId == "" {
|
||||
return fmt.Errorf("orgId is required")
|
||||
}
|
||||
|
||||
if od.Name == "" {
|
||||
return fmt.Errorf("name is required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads config params from json text
|
||||
func (od *OrgDomain) LoadConfig(jsondata string) error {
|
||||
d := *od
|
||||
err := json.Unmarshal([]byte(jsondata), &d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal json to OrgDomain{}")
|
||||
}
|
||||
*od = d
|
||||
return nil
|
||||
}
|
||||
|
||||
func (od *OrgDomain) GetSAMLEntityID() string {
|
||||
if od.SamlConfig != nil {
|
||||
return od.SamlConfig.SamlEntity
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (od *OrgDomain) GetSAMLIdpURL() string {
|
||||
if od.SamlConfig != nil {
|
||||
return od.SamlConfig.SamlIdp
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (od *OrgDomain) GetSAMLCert() string {
|
||||
if od.SamlConfig != nil {
|
||||
return od.SamlConfig.SamlCert
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// PrepareSamlRequest creates a request accordingly gosaml2
|
||||
func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
|
||||
|
||||
// this is the url Idp will call after login completion
|
||||
acs := fmt.Sprintf("%s://%s/%s",
|
||||
siteUrl.Scheme,
|
||||
siteUrl.Host,
|
||||
"api/v1/complete/saml")
|
||||
|
||||
// this is the address of the calling url, useful to redirect user
|
||||
sourceUrl := fmt.Sprintf("%s://%s%s",
|
||||
siteUrl.Scheme,
|
||||
siteUrl.Host,
|
||||
siteUrl.Path)
|
||||
|
||||
// ideally this should be some unique ID for each installation
|
||||
// but since we dont have UI to support it, we default it to
|
||||
// host. this issuer is an identifier of service provider (signoz)
|
||||
// on id provider (e.g. azure, okta). Azure requires this id to be configured
|
||||
// in their system, while others seem to not care about it.
|
||||
// currently we default it to host from window.location (received from browser)
|
||||
issuer := siteUrl.Host
|
||||
|
||||
return saml.PrepareRequest(issuer, acs, sourceUrl, od.GetSAMLEntityID(), od.GetSAMLIdpURL(), od.GetSAMLCert())
|
||||
}
|
||||
|
||||
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
|
||||
|
||||
sp, err := od.PrepareSamlRequest(siteUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
|
||||
|
||||
relayState := fmt.Sprintf("%s://%s%s?domainId=%s",
|
||||
siteUrl.Scheme,
|
||||
siteUrl.Host,
|
||||
siteUrl.Path,
|
||||
fmtDomainId)
|
||||
|
||||
return sp.BuildAuthURL(relayState)
|
||||
}
|
||||
91
ee/query-service/model/errors.go
Normal file
91
ee/query-service/model/errors.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type ApiError struct {
|
||||
Typ basemodel.ErrorType
|
||||
Err error
|
||||
}
|
||||
|
||||
func (a *ApiError) Type() basemodel.ErrorType {
|
||||
return a.Typ
|
||||
}
|
||||
|
||||
func (a *ApiError) ToError() error {
|
||||
if a != nil {
|
||||
return a.Err
|
||||
}
|
||||
return a.Err
|
||||
}
|
||||
|
||||
func (a *ApiError) Error() string {
|
||||
return a.Err.Error()
|
||||
}
|
||||
|
||||
func (a *ApiError) IsNil() bool {
|
||||
return a == nil || a.Err == nil
|
||||
}
|
||||
|
||||
// NewApiError returns a ApiError object of given type
|
||||
func NewApiError(typ basemodel.ErrorType, err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: typ,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// BadRequest returns a ApiError object of bad request
|
||||
func BadRequest(err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorBadData,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// InternalError returns a ApiError object of internal type
|
||||
func InternalError(err error) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
ErrorNone basemodel.ErrorType = ""
|
||||
ErrorTimeout basemodel.ErrorType = "timeout"
|
||||
ErrorCanceled basemodel.ErrorType = "canceled"
|
||||
ErrorExec basemodel.ErrorType = "execution"
|
||||
ErrorBadData basemodel.ErrorType = "bad_data"
|
||||
ErrorInternal basemodel.ErrorType = "internal"
|
||||
ErrorUnavailable basemodel.ErrorType = "unavailable"
|
||||
ErrorNotFound basemodel.ErrorType = "not_found"
|
||||
ErrorNotImplemented basemodel.ErrorType = "not_implemented"
|
||||
ErrorUnauthorized basemodel.ErrorType = "unauthorized"
|
||||
ErrorForbidden basemodel.ErrorType = "forbidden"
|
||||
ErrorConflict basemodel.ErrorType = "conflict"
|
||||
ErrorStreamingNotSupported basemodel.ErrorType = "streaming is not supported"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ErrorNone = basemodel.ErrorNone
|
||||
ErrorTimeout = basemodel.ErrorTimeout
|
||||
ErrorCanceled = basemodel.ErrorCanceled
|
||||
ErrorExec = basemodel.ErrorExec
|
||||
ErrorBadData = basemodel.ErrorBadData
|
||||
ErrorInternal = basemodel.ErrorInternal
|
||||
ErrorUnavailable = basemodel.ErrorUnavailable
|
||||
ErrorNotFound = basemodel.ErrorNotFound
|
||||
ErrorNotImplemented = basemodel.ErrorNotImplemented
|
||||
ErrorUnauthorized = basemodel.ErrorUnauthorized
|
||||
ErrorForbidden = basemodel.ErrorForbidden
|
||||
ErrorConflict = basemodel.ErrorConflict
|
||||
ErrorStreamingNotSupported = basemodel.ErrorStreamingNotSupported
|
||||
}
|
||||
|
||||
type ErrUnsupportedAuth struct{}
|
||||
|
||||
func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {
|
||||
return "this authentication method not supported"
|
||||
}
|
||||
91
ee/query-service/model/license.go
Normal file
91
ee/query-service/model/license.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type License struct {
|
||||
Key string `json:"key" db:"key"`
|
||||
ActivationId string `json:"activationId" db:"activationId"`
|
||||
CreatedAt time.Time `db:"created_at"`
|
||||
|
||||
// PlanDetails contains the encrypted plan info
|
||||
PlanDetails string `json:"planDetails" db:"planDetails"`
|
||||
|
||||
// stores parsed license details
|
||||
LicensePlan
|
||||
|
||||
FeatureSet basemodel.FeatureSet
|
||||
|
||||
// populated in case license has any errors
|
||||
ValidationMessage string `db:"validationMessage"`
|
||||
|
||||
// used only for sending details to front-end
|
||||
IsCurrent bool `json:"isCurrent"`
|
||||
}
|
||||
|
||||
func (l *License) MarshalJSON() ([]byte, error) {
|
||||
|
||||
return json.Marshal(&struct {
|
||||
Key string `json:"key" db:"key"`
|
||||
ActivationId string `json:"activationId" db:"activationId"`
|
||||
ValidationMessage string `db:"validationMessage"`
|
||||
IsCurrent bool `json:"isCurrent"`
|
||||
PlanKey string `json:"planKey"`
|
||||
ValidFrom time.Time `json:"ValidFrom"`
|
||||
ValidUntil time.Time `json:"ValidUntil"`
|
||||
Status string `json:"status"`
|
||||
}{
|
||||
Key: l.Key,
|
||||
ActivationId: l.ActivationId,
|
||||
IsCurrent: l.IsCurrent,
|
||||
PlanKey: l.PlanKey,
|
||||
ValidFrom: time.Unix(l.ValidFrom, 0),
|
||||
ValidUntil: time.Unix(l.ValidUntil, 0),
|
||||
Status: l.Status,
|
||||
ValidationMessage: l.ValidationMessage,
|
||||
})
|
||||
}
|
||||
|
||||
type LicensePlan struct {
|
||||
PlanKey string `json:"planKey"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidUntil int64 `json:"validUntil"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func (l *License) ParsePlan() error {
|
||||
l.LicensePlan = LicensePlan{}
|
||||
|
||||
planData, err := base64.StdEncoding.DecodeString(l.PlanDetails)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
plan := LicensePlan{}
|
||||
err = json.Unmarshal([]byte(planData), &plan)
|
||||
if err != nil {
|
||||
l.ValidationMessage = "failed to parse plan from license"
|
||||
return errors.Wrap(err, "failed to parse plan from license")
|
||||
}
|
||||
|
||||
l.LicensePlan = plan
|
||||
l.ParseFeatures()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *License) ParseFeatures() {
|
||||
switch l.PlanKey {
|
||||
case Pro:
|
||||
l.FeatureSet = ProPlan
|
||||
case Enterprise:
|
||||
l.FeatureSet = EnterprisePlan
|
||||
default:
|
||||
l.FeatureSet = BasicPlan
|
||||
}
|
||||
}
|
||||
27
ee/query-service/model/plans.go
Normal file
27
ee/query-service/model/plans.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
const SSO = "SSO"
|
||||
const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
|
||||
var BasicPlan = basemodel.FeatureSet{
|
||||
Basic: true,
|
||||
SSO: false,
|
||||
DisableUpsell: false,
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
Pro: true,
|
||||
SSO: true,
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
Enterprise: true,
|
||||
SSO: true,
|
||||
}
|
||||
35
ee/query-service/model/usage.go
Normal file
35
ee/query-service/model/usage.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type UsageSnapshot struct {
|
||||
CurrentLogSizeBytes uint64 `json:"currentLogSizeBytes"`
|
||||
CurrentLogSizeBytesColdStorage uint64 `json:"currentLogSizeBytesColdStorage"`
|
||||
CurrentSpansCount uint64 `json:"currentSpansCount"`
|
||||
CurrentSpansCountColdStorage uint64 `json:"currentSpansCountColdStorage"`
|
||||
CurrentSamplesCount uint64 `json:"currentSamplesCount"`
|
||||
CurrentSamplesCountColdStorage uint64 `json:"currentSamplesCountColdStorage"`
|
||||
}
|
||||
|
||||
type UsageBase struct {
|
||||
Id uuid.UUID `json:"id" db:"id"`
|
||||
InstallationId uuid.UUID `json:"installationId" db:"installation_id"`
|
||||
ActivationId uuid.UUID `json:"activationId" db:"activation_id"`
|
||||
CreatedAt time.Time `json:"createdAt" db:"created_at"`
|
||||
FailedSyncRequest int `json:"failedSyncRequest" db:"failed_sync_request_count"`
|
||||
}
|
||||
|
||||
type UsagePayload struct {
|
||||
UsageBase
|
||||
Metrics UsageSnapshot `json:"metrics"`
|
||||
SnapshotDate time.Time `json:"snapshotDate"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
UsageBase
|
||||
Snapshot string `db:"snapshot"`
|
||||
}
|
||||
107
ee/query-service/saml/request.go
Normal file
107
ee/query-service/saml/request.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package saml
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
saml2 "github.com/russellhaering/gosaml2"
|
||||
dsig "github.com/russellhaering/goxmldsig"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func LoadCertificateStore(certString string) (dsig.X509CertificateStore, error) {
|
||||
certStore := &dsig.MemoryX509CertificateStore{
|
||||
Roots: []*x509.Certificate{},
|
||||
}
|
||||
|
||||
certData, err := base64.StdEncoding.DecodeString(certString)
|
||||
if err != nil {
|
||||
return certStore, fmt.Errorf(fmt.Sprintf("failed to read certificate: %v", err))
|
||||
}
|
||||
|
||||
idpCert, err := x509.ParseCertificate(certData)
|
||||
if err != nil {
|
||||
return certStore, fmt.Errorf(fmt.Sprintf("failed to prepare saml request, invalid cert: %s", err.Error()))
|
||||
}
|
||||
|
||||
certStore.Roots = append(certStore.Roots, idpCert)
|
||||
|
||||
return certStore, nil
|
||||
}
|
||||
|
||||
func LoadCertFromPem(certString string) (dsig.X509CertificateStore, error) {
|
||||
certStore := &dsig.MemoryX509CertificateStore{
|
||||
Roots: []*x509.Certificate{},
|
||||
}
|
||||
|
||||
block, _ := pem.Decode([]byte(certString))
|
||||
if block == nil {
|
||||
return certStore, fmt.Errorf("no valid pem cert found")
|
||||
}
|
||||
|
||||
idpCert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return certStore, fmt.Errorf(fmt.Sprintf("failed to parse pem cert: %s", err.Error()))
|
||||
}
|
||||
|
||||
certStore.Roots = append(certStore.Roots, idpCert)
|
||||
|
||||
return certStore, nil
|
||||
}
|
||||
|
||||
// PrepareRequest prepares authorization URL (Idp Provider URL)
|
||||
func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*saml2.SAMLServiceProvider, error) {
|
||||
var certStore dsig.X509CertificateStore
|
||||
if certString == "" {
|
||||
return nil, fmt.Errorf("invalid certificate data")
|
||||
}
|
||||
|
||||
var err error
|
||||
if strings.Contains(certString, "-----BEGIN CERTIFICATE-----") {
|
||||
certStore, err = LoadCertFromPem(certString)
|
||||
} else {
|
||||
certStore, err = LoadCertificateStore(certString)
|
||||
}
|
||||
// certificate store can not be created, throw error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
randomKeyStore := dsig.RandomKeyStoreForTest()
|
||||
|
||||
// SIGNOZ_SAML_RETURN_URL env var would support overriding window.location
|
||||
// as return destination after saml request is complete from IdP side.
|
||||
// this var is also useful for development, as it is easy to override with backend endpoint
|
||||
// e.g. http://localhost:8080/api/v1/complete/saml
|
||||
acsUrl = constants.GetOrDefaultEnv("SIGNOZ_SAML_RETURN_URL", acsUrl)
|
||||
|
||||
sp := &saml2.SAMLServiceProvider{
|
||||
IdentityProviderSSOURL: idp,
|
||||
IdentityProviderIssuer: entity,
|
||||
ServiceProviderIssuer: issuer,
|
||||
AssertionConsumerServiceURL: acsUrl,
|
||||
SignAuthnRequests: true,
|
||||
AllowMissingAttributes: true,
|
||||
|
||||
// about cert stores -sender(signoz app) and receiver (idp)
|
||||
// The random key (random key store) is sender cert. The public cert store(IDPCertificateStore) that you see on org domain is receiver cert (idp provided).
|
||||
// At the moment, the library we use doesn't bother about sender cert and IdP too. It just adds additional layer of security, which we can explore in future versions
|
||||
// The receiver (Idp) cert will be different for each org domain. Imagine cloud setup where each company setups their domain that integrates with their Idp.
|
||||
// @signoz.io
|
||||
// @next.io
|
||||
// Each of above will have their own Idp setup and hence separate public cert to decrypt the response.
|
||||
// The way SAML request travels is -
|
||||
// SigNoz Backend -> IdP Login Screen -> SigNoz Backend -> SigNoz Frontend
|
||||
// ---------------- | -------------------| -------------------------------------
|
||||
// The dotted lines indicate request boundries. So if you notice, the response from Idp starts a new request. hence we need relay state to pass the context around.
|
||||
|
||||
IDPCertificateStore: certStore,
|
||||
SPKeyStore: randomKeyStore,
|
||||
}
|
||||
zap.S().Debugf("SAML request:", sp)
|
||||
return sp, nil
|
||||
}
|
||||
317
ee/query-service/usage/manager.go
Normal file
317
ee/query-service/usage/manager.go
Normal file
@@ -0,0 +1,317 @@
|
||||
package usage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.uber.org/zap"
|
||||
|
||||
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/ee/query-service/usage/repository"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxRetries = 3
|
||||
RetryInterval = 5 * time.Second
|
||||
stateUnlocked uint32 = 0
|
||||
stateLocked uint32 = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// collect usage every hour
|
||||
collectionFrequency = 1 * time.Hour
|
||||
|
||||
// send usage every 24 hour
|
||||
uploadFrequency = 24 * time.Hour
|
||||
|
||||
locker = stateUnlocked
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
repository *repository.Repository
|
||||
|
||||
clickhouseConn clickhouse.Conn
|
||||
|
||||
licenseRepo *license.Repo
|
||||
|
||||
// end the usage routine, this is important to gracefully
|
||||
// stopping usage reporting and protect in-consistent updates
|
||||
done chan struct{}
|
||||
|
||||
// terminated waits for the UsageExporter go routine to end
|
||||
terminated chan struct{}
|
||||
}
|
||||
|
||||
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||
repo := repository.New(db)
|
||||
|
||||
err := repo.Init(dbType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate usage repo: %v", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
repository: repo,
|
||||
clickhouseConn: clickhouseConn,
|
||||
licenseRepo: licenseRepo,
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// start loads collects and exports any exported snapshot and starts the exporter
|
||||
func (lm *Manager) Start() error {
|
||||
// compares the locker and stateUnlocked if both are same lock is applied else returns error
|
||||
if !atomic.CompareAndSwapUint32(&locker, stateUnlocked, stateLocked) {
|
||||
return fmt.Errorf("usage exporter is locked")
|
||||
}
|
||||
|
||||
// check if license is present or not
|
||||
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get active license")
|
||||
}
|
||||
if license == nil {
|
||||
// we will not start the usage reporting if license is not present.
|
||||
zap.S().Info("no license present, skipping usage reporting")
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload previous snapshots if any
|
||||
err = lm.UploadUsage(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// collect snapshot if incase it wasn't collect in (t - collectionFrequency)
|
||||
err = lm.CollectCurrentUsage(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go lm.UsageExporter(context.Background())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectCurrentUsage checks if needs to collect usage data
|
||||
func (lm *Manager) CollectCurrentUsage(ctx context.Context) error {
|
||||
// check the DB if anything exist where timestamp > t - collectionFrequency
|
||||
ts := time.Now().Add(-collectionFrequency)
|
||||
alreadyCreated, err := lm.repository.CheckSnapshotGtCreatedAt(ctx, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !alreadyCreated {
|
||||
zap.S().Info("Collecting current usage")
|
||||
exportError := lm.CollectAndStoreUsage(ctx)
|
||||
if exportError != nil {
|
||||
return exportError
|
||||
}
|
||||
} else {
|
||||
zap.S().Info("Nothing to collect")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UsageExporter(ctx context.Context) {
|
||||
defer close(lm.terminated)
|
||||
|
||||
collectionTicker := time.NewTicker(collectionFrequency)
|
||||
defer collectionTicker.Stop()
|
||||
|
||||
uploadTicker := time.NewTicker(uploadFrequency)
|
||||
defer uploadTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-collectionTicker.C:
|
||||
lm.CollectAndStoreUsage(ctx)
|
||||
case <-uploadTicker.C:
|
||||
lm.UploadUsage(ctx)
|
||||
// remove the old snapshots
|
||||
lm.repository.DropOldSnapshots(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TableSize struct {
|
||||
Table string `ch:"table"`
|
||||
DiskName string `ch:"disk_name"`
|
||||
Rows uint64 `ch:"rows"`
|
||||
UncompressedBytes uint64 `ch:"uncompressed_bytes"`
|
||||
}
|
||||
|
||||
func (lm *Manager) CollectAndStoreUsage(ctx context.Context) error {
|
||||
snap, err := lm.GetUsageFromClickHouse(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
license, err := lm.licenseRepo.GetActiveLicense(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activationId, _ := uuid.Parse(license.ActivationId)
|
||||
// TODO (nitya) : Add installation ID in the payload
|
||||
payload := model.UsagePayload{
|
||||
UsageBase: model.UsageBase{
|
||||
ActivationId: activationId,
|
||||
FailedSyncRequest: 0,
|
||||
},
|
||||
Metrics: *snap,
|
||||
SnapshotDate: time.Now(),
|
||||
}
|
||||
|
||||
err = lm.repository.InsertSnapshot(ctx, &payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) GetUsageFromClickHouse(ctx context.Context) (*model.UsageSnapshot, error) {
|
||||
tableSizes := []TableSize{}
|
||||
snap := model.UsageSnapshot{}
|
||||
|
||||
// get usage from clickhouse
|
||||
query := `
|
||||
SELECT
|
||||
table,
|
||||
disk_name,
|
||||
sum(rows) as rows,
|
||||
sum(data_uncompressed_bytes) AS uncompressed_bytes
|
||||
FROM system.parts
|
||||
WHERE active AND (database in ('signoz_logs', 'signoz_metrics', 'signoz_traces')) AND (table in ('logs','samples_v2', 'signoz_index_v2'))
|
||||
GROUP BY
|
||||
table,
|
||||
disk_name
|
||||
ORDER BY table
|
||||
`
|
||||
err := lm.clickhouseConn.Select(ctx, &tableSizes, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, val := range tableSizes {
|
||||
switch val.Table {
|
||||
case "logs":
|
||||
if val.DiskName == "default" {
|
||||
snap.CurrentLogSizeBytes = val.UncompressedBytes
|
||||
} else {
|
||||
snap.CurrentLogSizeBytesColdStorage = val.UncompressedBytes
|
||||
}
|
||||
case "samples_v2":
|
||||
if val.DiskName == "default" {
|
||||
snap.CurrentSamplesCount = val.Rows
|
||||
} else {
|
||||
snap.CurrentSamplesCountColdStorage = val.Rows
|
||||
}
|
||||
case "signoz_index_v2":
|
||||
if val.DiskName == "default" {
|
||||
snap.CurrentSpansCount = val.Rows
|
||||
} else {
|
||||
snap.CurrentSpansCountColdStorage = val.Rows
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &snap, nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsage(ctx context.Context) error {
|
||||
snapshots, err := lm.repository.GetSnapshotsNotSynced(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(snapshots) <= 0 {
|
||||
zap.S().Info("no snapshots to upload, skipping.")
|
||||
return nil
|
||||
}
|
||||
|
||||
zap.S().Info("uploading snapshots")
|
||||
for _, snap := range snapshots {
|
||||
metricsBytes, err := encryption.Decrypt([]byte(snap.ActivationId.String()[:32]), []byte(snap.Snapshot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metrics := model.UsageSnapshot{}
|
||||
err = json.Unmarshal(metricsBytes, &metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = lm.UploadUsageWithExponentalBackOff(ctx, model.UsagePayload{
|
||||
UsageBase: model.UsageBase{
|
||||
Id: snap.Id,
|
||||
InstallationId: snap.InstallationId,
|
||||
ActivationId: snap.ActivationId,
|
||||
FailedSyncRequest: snap.FailedSyncRequest,
|
||||
},
|
||||
SnapshotDate: snap.CreatedAt,
|
||||
Metrics: metrics,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
|
||||
for i := 1; i <= MaxRetries; i++ {
|
||||
apiErr := licenseserver.SendUsage(ctx, &payload)
|
||||
if apiErr != nil && i == MaxRetries {
|
||||
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to updated the failure count for snapshot in DB : ", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
zap.S().Errorf("retries stopped : %v", zap.Error(err))
|
||||
// not returning error here since it is captured in the failed count
|
||||
return nil
|
||||
} else if apiErr != nil {
|
||||
// sleeping for exponential backoff
|
||||
sleepDuration := RetryInterval * time.Duration(i)
|
||||
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// update the failed request count
|
||||
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to updated the failure count for snapshot in DB : %v", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// update the database that it is synced
|
||||
err := lm.repository.MoveToSynced(ctx, payload.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) Stop() {
|
||||
close(lm.done)
|
||||
atomic.StoreUint32(&locker, stateUnlocked)
|
||||
<-lm.terminated
|
||||
}
|
||||
139
ee/query-service/usage/repository/repository.go
Normal file
139
ee/query-service/usage/repository/repository.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/ee/query-service/usage/sqlite"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxFailedSyncCount = 9 // a snapshot will be ignored if the max failed count is greater than or equal to 9
|
||||
SnapShotLife = 3 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Repository is usage Repository which stores usage snapshot in a secured DB
|
||||
type Repository struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// New initiates a new usage Repository
|
||||
func New(db *sqlx.DB) *Repository {
|
||||
return &Repository{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repository) Init(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repository) InsertSnapshot(ctx context.Context, usage *model.UsagePayload) error {
|
||||
|
||||
snapshotBytes, err := json.Marshal(usage.Metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usage.Id = uuid.New()
|
||||
|
||||
encryptedSnapshot, err := encryption.Encrypt([]byte(usage.ActivationId.String()[:32]), snapshotBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := `INSERT INTO usage(id, activation_id, snapshot)
|
||||
VALUES ($1, $2, $3)`
|
||||
_, err = r.db.ExecContext(ctx,
|
||||
query,
|
||||
usage.Id,
|
||||
usage.ActivationId,
|
||||
string(encryptedSnapshot),
|
||||
)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error inserting usage data: %v", zap.Error(err))
|
||||
return fmt.Errorf("failed to insert usage in db: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) MoveToSynced(ctx context.Context, id uuid.UUID) error {
|
||||
|
||||
query := `UPDATE usage
|
||||
SET synced = 'true',
|
||||
synced_at = $1
|
||||
WHERE id = $2`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, query, time.Now(), id)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
|
||||
return fmt.Errorf("failed to update usage in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) IncrementFailedRequestCount(ctx context.Context, id uuid.UUID) error {
|
||||
|
||||
query := `UPDATE usage SET failed_sync_request_count = failed_sync_request_count + 1 WHERE id = $1`
|
||||
_, err := r.db.ExecContext(ctx, query, id)
|
||||
if err != nil {
|
||||
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
|
||||
return fmt.Errorf("failed to update usage in db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetSnapshotsNotSynced(ctx context.Context) ([]*model.Usage, error) {
|
||||
snapshots := []*model.Usage{}
|
||||
|
||||
query := `SELECT id,created_at, activation_id, snapshot, failed_sync_request_count from usage where synced!='true' and failed_sync_request_count < $1 order by created_at asc `
|
||||
|
||||
err := r.db.SelectContext(ctx, &snapshots, query, MaxFailedSyncCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
func (r *Repository) DropOldSnapshots(ctx context.Context) error {
|
||||
query := `delete from usage where created_at <= $1`
|
||||
|
||||
_, err := r.db.ExecContext(ctx, query, time.Now().Add(-(SnapShotLife)))
|
||||
if err != nil {
|
||||
zap.S().Errorf("failed to remove old snapshots from db: %v", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckSnapshotGtCreatedAt checks if there is any snapshot greater than the provided timestamp
|
||||
func (r *Repository) CheckSnapshotGtCreatedAt(ctx context.Context, ts time.Time) (bool, error) {
|
||||
|
||||
var snapshots uint64
|
||||
query := `SELECT count() from usage where created_at > '$1'`
|
||||
|
||||
err := r.db.QueryRowContext(ctx, query, ts).Scan(&snapshots)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return snapshots > 0, err
|
||||
}
|
||||
32
ee/query-service/usage/sqlite/init.go
Normal file
32
ee/query-service/usage/sqlite/init.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func InitDB(db *sqlx.DB) error {
|
||||
var err error
|
||||
if db == nil {
|
||||
return fmt.Errorf("invalid db connection")
|
||||
}
|
||||
|
||||
table_schema := `CREATE TABLE IF NOT EXISTS usage(
|
||||
id UUID PRIMARY KEY,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
activation_id UUID,
|
||||
snapshot TEXT,
|
||||
synced BOOLEAN DEFAULT 'false',
|
||||
synced_at TIMESTAMP,
|
||||
failed_sync_request_count INTEGER DEFAULT 0
|
||||
);
|
||||
`
|
||||
|
||||
_, err = db.Exec(table_schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in creating usage table: %v", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
node_modules
|
||||
.vscode
|
||||
build
|
||||
.env
|
||||
.env
|
||||
.git
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
node_modules
|
||||
build
|
||||
build
|
||||
*.typegen.ts
|
||||
i18-generate-hash.js
|
||||
@@ -84,12 +84,28 @@ module.exports = {
|
||||
tsx: 'never',
|
||||
},
|
||||
],
|
||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||
'jsx-a11y/label-has-associated-control': [
|
||||
'error',
|
||||
{
|
||||
required: {
|
||||
some: ['nesting', 'id'],
|
||||
},
|
||||
},
|
||||
],
|
||||
'jsx-a11y/label-has-for': [
|
||||
'error',
|
||||
{
|
||||
required: {
|
||||
some: ['nesting', 'id'],
|
||||
},
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
|
||||
// eslint rules need to remove
|
||||
'no-shadow': 'off',
|
||||
'@typescript-eslint/no-shadow': 'off',
|
||||
'global-require': 'off',
|
||||
'@typescript-eslint/no-var-requires': 'off',
|
||||
'import/no-cycle': 'off',
|
||||
|
||||
'prettier/prettier': [
|
||||
|
||||
4
frontend/.husky/commit-msg
Executable file
4
frontend/.husky/commit-msg
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && npm run commitlint
|
||||
4
frontend/.husky/pre-commit
Executable file
4
frontend/.husky/pre-commit
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && yarn lint-staged
|
||||
@@ -1 +1 @@
|
||||
12.13.0
|
||||
16.15.0
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage1 as builder
|
||||
FROM node:12.22.0 as builder
|
||||
# Builder stage
|
||||
FROM node:16.15.0-slim as builder
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -9,24 +9,23 @@ ARG TARGETARCH
|
||||
|
||||
WORKDIR /frontend
|
||||
|
||||
# copy the package.json to install dependencies
|
||||
# Copy the package.json to install dependencies
|
||||
COPY package.json ./
|
||||
|
||||
# Install the dependencies and make the folder
|
||||
RUN yarn install
|
||||
RUN CI=1 yarn install
|
||||
|
||||
COPY . .
|
||||
|
||||
# Build the project and copy the files
|
||||
RUN yarn build
|
||||
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
#!/bin/sh
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
## Remove default nginx index page
|
||||
# Remove default nginx index page
|
||||
RUN rm -rf /usr/share/nginx/html/*
|
||||
|
||||
# Copy from the stahg 1
|
||||
@@ -34,4 +33,4 @@ COPY --from=builder /frontend/build /usr/share/nginx/html
|
||||
|
||||
EXPOSE 3301
|
||||
|
||||
ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||
ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||
|
||||
6
frontend/babel.config.js
Normal file
6
frontend/babel.config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
presets: [
|
||||
['@babel/preset-env', { targets: { node: 'current' } }],
|
||||
'@babel/preset-typescript',
|
||||
],
|
||||
};
|
||||
1
frontend/commitlint.config.ts
Normal file
1
frontend/commitlint.config.ts
Normal file
@@ -0,0 +1 @@
|
||||
export default { extends: ['@commitlint/config-conventional'] };
|
||||
@@ -1,15 +1,19 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user