Compare commits
1136 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
55c9eb733d | ||
|
|
54cc363752 | ||
|
|
998e72374f | ||
|
|
04cf1b2697 | ||
|
|
8bdc41bef0 | ||
|
|
616da88790 | ||
|
|
1ebf64589f | ||
|
|
80c96af5a4 | ||
|
|
425b732370 | ||
|
|
a742c9aee1 | ||
|
|
3968f11b3d | ||
|
|
5bfc2af51b | ||
|
|
8146da52af | ||
|
|
5dc6d28f2e | ||
|
|
a6ed6c03c1 | ||
|
|
cca4db602c | ||
|
|
7ff49ba47c | ||
|
|
68194d7e07 | ||
|
|
7881aee350 | ||
|
|
39be8201aa | ||
|
|
6778163a07 | ||
|
|
56a2047560 | ||
|
|
023ef66035 | ||
|
|
22b8572495 | ||
|
|
e39d2f799d | ||
|
|
2c383528bc | ||
|
|
0378dfd12f | ||
|
|
f8f903848e | ||
|
|
a771c3b9a6 | ||
|
|
ff9c41464b | ||
|
|
acb3721815 | ||
|
|
475c44a000 | ||
|
|
1b6597b974 | ||
|
|
bf2f3f8f5e | ||
|
|
d92aad38df | ||
|
|
78d13c94ae | ||
|
|
73e699080d | ||
|
|
b6aa378fae | ||
|
|
b5fe3d7fa1 | ||
|
|
7c14a75c68 | ||
|
|
10c6325e46 | ||
|
|
e4883495c3 | ||
|
|
b9d63d6b8f | ||
|
|
65804f245c | ||
|
|
964b819f20 | ||
|
|
e22be60a9e | ||
|
|
b6a6833a64 | ||
|
|
c90e9ffa34 | ||
|
|
c5c7fb238f | ||
|
|
4ad79bee18 | ||
|
|
bebfaa1c4c | ||
|
|
6fb7e34dbc | ||
|
|
a2e1c41343 | ||
|
|
46f258747a | ||
|
|
11c352741d | ||
|
|
a63267cf90 | ||
|
|
3200248e98 | ||
|
|
a8c7237bbb | ||
|
|
3a287b2b16 | ||
|
|
c3d665e119 | ||
|
|
8d03569a0a | ||
|
|
0620cacb0b | ||
|
|
7da69f6a75 | ||
|
|
7aeaecaf1f | ||
|
|
3ea36092f6 | ||
|
|
8db4793ad6 | ||
|
|
83f3180641 | ||
|
|
64e638fd58 | ||
|
|
5554cce379 | ||
|
|
3e2a6df200 | ||
|
|
3dc1dc970f | ||
|
|
0ceaa56679 | ||
|
|
ab52538e91 | ||
|
|
ef69505bf9 | ||
|
|
61b79742dc | ||
|
|
4d1516e3fc | ||
|
|
0b08c80038 | ||
|
|
a84754e8a8 | ||
|
|
a09a4c264e | ||
|
|
54e09e1292 | ||
|
|
8477aebc8e | ||
|
|
d7f7f20520 | ||
|
|
4ee92b7c55 | ||
|
|
80c80b2180 | ||
|
|
da368ab5e8 | ||
|
|
091d769ad8 | ||
|
|
be814afeea | ||
|
|
6697702c0f | ||
|
|
5e93f266c1 | ||
|
|
352c7ac581 | ||
|
|
db8f35cca2 | ||
|
|
3af1d2b5bb | ||
|
|
58038c222f | ||
|
|
b3b5459a08 | ||
|
|
1bfc9877fc | ||
|
|
8ce806169f | ||
|
|
3c7e0f66fa | ||
|
|
cbdeb5ad03 | ||
|
|
5bbe1246cc | ||
|
|
789d65d7c4 | ||
|
|
5cbc8af4af | ||
|
|
2cffe0c53e | ||
|
|
cf0eb44143 | ||
|
|
d7ce786f4b | ||
|
|
ae5d4326a2 | ||
|
|
6fa0209104 | ||
|
|
50501ea80f | ||
|
|
669dc05eec | ||
|
|
e839920b3b | ||
|
|
32b4bbcaec | ||
|
|
58b0c08d71 | ||
|
|
dd9cbcee33 | ||
|
|
9c3b4508be | ||
|
|
450407d0bf | ||
|
|
276b26b170 | ||
|
|
475723a03a | ||
|
|
897728cc71 | ||
|
|
bdf78cbf2c | ||
|
|
e88cfcd4da | ||
|
|
90566360ae | ||
|
|
0a6fa0ee85 | ||
|
|
ba35b3e442 | ||
|
|
73c2137cd7 | ||
|
|
dbe68c064c | ||
|
|
ba7427f280 | ||
|
|
6dbc11991b | ||
|
|
eeae71163c | ||
|
|
a25e7a64ce | ||
|
|
d0e272b679 | ||
|
|
16ff59b4de | ||
|
|
f2074a9d0e | ||
|
|
47e6e00a64 | ||
|
|
282c47def8 | ||
|
|
9d3fc493a3 | ||
|
|
b2afb9aabc | ||
|
|
a733adad2c | ||
|
|
cc18cc9087 | ||
|
|
ecb2ed8ac8 | ||
|
|
31931e5a6c | ||
|
|
31848c488d | ||
|
|
bdcc997672 | ||
|
|
d68334b2ca | ||
|
|
3ebded66ea | ||
|
|
2ed24df250 | ||
|
|
5a34ce2221 | ||
|
|
aae6a1adf1 | ||
|
|
bef83d30cc | ||
|
|
1ebf3dbf65 | ||
|
|
f57808bdb4 | ||
|
|
6bdcd4f5bb | ||
|
|
d726ad9ca6 | ||
|
|
4ed3295b80 | ||
|
|
72dc4d62ce | ||
|
|
186f4dca71 | ||
|
|
e4f2219f8c | ||
|
|
fe9a6c2448 | ||
|
|
5c2a875211 | ||
|
|
6dab77409d | ||
|
|
0f811af34e | ||
|
|
bdbcbb5f6c | ||
|
|
ae91d7e8a9 | ||
|
|
64927acd97 | ||
|
|
9dae957c8f | ||
|
|
afbcde5edc | ||
|
|
b8c3fd1cbf | ||
|
|
93cf5dfa46 | ||
|
|
d2c28a47c2 | ||
|
|
9c68c6af93 | ||
|
|
3771f85c7d | ||
|
|
b39e0465b0 | ||
|
|
bc97ea8fc0 | ||
|
|
1e980c3886 | ||
|
|
5ec52f03ad | ||
|
|
4aab923e40 | ||
|
|
17b0ee5434 | ||
|
|
08c3c4c51c | ||
|
|
5f802e0e20 | ||
|
|
63e663a92d | ||
|
|
d21ab7b82d | ||
|
|
84b876170d | ||
|
|
88d8dba90e | ||
|
|
d7d0d70aa5 | ||
|
|
671b441ec9 | ||
|
|
729c7fce7b | ||
|
|
224ec8d0d9 | ||
|
|
7eed865660 | ||
|
|
241121ebec | ||
|
|
15af158a9c | ||
|
|
2f02aeb031 | ||
|
|
3603e497a6 | ||
|
|
070d32a0ef | ||
|
|
0b36da714f | ||
|
|
ce0ac1e3af | ||
|
|
bcb5256de0 | ||
|
|
fdca72b9b2 | ||
|
|
7f64dfd023 | ||
|
|
8871d53ae0 | ||
|
|
2313ec3f9a | ||
|
|
56208c9b06 | ||
|
|
84e281271c | ||
|
|
43e4f637d1 | ||
|
|
c156b9c403 | ||
|
|
9885572842 | ||
|
|
4803fd9c8e | ||
|
|
c2fe35388e | ||
|
|
ba5e3dcfd3 | ||
|
|
9c8c31d912 | ||
|
|
469254e9fc | ||
|
|
1f2ec0d728 | ||
|
|
ff1fc83b66 | ||
|
|
0a5eff2255 | ||
|
|
24e84bac2a | ||
|
|
db00a78a4e | ||
|
|
4d2e8b0ea5 | ||
|
|
4f12f8c85c | ||
|
|
fabab345cb | ||
|
|
00355b3383 | ||
|
|
c16ae790d4 | ||
|
|
c6d57a7a53 | ||
|
|
d8775c91d7 | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
b3dfd567e0 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
660391c360 | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
8a0bcf6cd9 | ||
|
|
0c06c5ee0e | ||
|
|
f3610ffe55 | ||
|
|
d150cfa46c | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
3995de16f0 | ||
|
|
f149258de2 | ||
|
|
da386b0e8e | ||
|
|
75cdac376f | ||
|
|
0ef13a89ed | ||
|
|
084d8ecccd | ||
|
|
b9f3663b6c | ||
|
|
4067aa5025 | ||
|
|
ebf9316714 | ||
|
|
f5009abca6 | ||
|
|
b16a793cbc | ||
|
|
374a2415d9 | ||
|
|
3789e25a1e | ||
|
|
10ab057e29 | ||
|
|
41b9129145 | ||
|
|
f5d10b72f0 | ||
|
|
6fb6a576aa | ||
|
|
7cf567792a | ||
|
|
fe18e85e36 | ||
|
|
147476d802 | ||
|
|
c94f23a710 | ||
|
|
1a2ef4fde6 | ||
|
|
6c505f9e86 | ||
|
|
fb97540c7c | ||
|
|
9cf5c7ef74 | ||
|
|
6223e89d4c | ||
|
|
62f8cddc27 | ||
|
|
ffae767fab | ||
|
|
c23f97c3d0 | ||
|
|
11eb1e4f72 | ||
|
|
0554ed7ecb | ||
|
|
ca4ce0d380 | ||
|
|
65f50bb70d | ||
|
|
dbe9f3a034 | ||
|
|
7cdd136f61 | ||
|
|
21d5e0b71c | ||
|
|
fe53aa412b | ||
|
|
6c5a48082b | ||
|
|
b7adc27f02 | ||
|
|
67b4290846 | ||
|
|
8a7cbc8ad3 | ||
|
|
c74d87a21a | ||
|
|
6486425f46 | ||
|
|
5b316afa12 | ||
|
|
2dcc6fda77 | ||
|
|
a2f570d78c | ||
|
|
ef209e11d5 | ||
|
|
1851e76bca | ||
|
|
fa23050916 | ||
|
|
79475bde71 | ||
|
|
039201acae | ||
|
|
22454abc4a | ||
|
|
4c8b7af0eb | ||
|
|
5caf94f024 | ||
|
|
ce0b37ca2e | ||
|
|
5f529e1c10 | ||
|
|
05c923df9b | ||
|
|
90637212bc | ||
|
|
b58f45c268 | ||
|
|
6a6fd44719 | ||
|
|
81cc120539 | ||
|
|
831381a1ff | ||
|
|
fd0656e0fc | ||
|
|
e217ea0c9c | ||
|
|
bdf9333dcf | ||
|
|
eae97d6ffc | ||
|
|
9f5241e82c | ||
|
|
284eda4072 | ||
|
|
63693a4185 | ||
|
|
d9cf9071d3 | ||
|
|
5e41c7f62b | ||
|
|
e903277143 | ||
|
|
f2def38df8 | ||
|
|
71e742fb2b | ||
|
|
571e087f31 | ||
|
|
3e5f9f3b25 | ||
|
|
c969b5f329 | ||
|
|
5bcf42d398 | ||
|
|
c81b0b2a8b | ||
|
|
d52308c9b5 | ||
|
|
7948bca710 | ||
|
|
29c0b43481 | ||
|
|
9351fd09c2 | ||
|
|
59f32884d2 | ||
|
|
6ccdc5296e | ||
|
|
3ef9d96678 | ||
|
|
642ece288e | ||
|
|
3ab4f71aa1 | ||
|
|
b5be770a03 | ||
|
|
08e3428744 | ||
|
|
b335d440cf | ||
|
|
1293378c5c | ||
|
|
5424c7714f | ||
|
|
95311db543 | ||
|
|
bf52722689 | ||
|
|
6064840dd1 | ||
|
|
182adc551c | ||
|
|
2b5b79e34a | ||
|
|
508c6ced80 | ||
|
|
3c2173de9e | ||
|
|
9a6bcaadf8 | ||
|
|
08bbb0259d | ||
|
|
93638d5615 | ||
|
|
844ca57686 | ||
|
|
b2eec25f33 | ||
|
|
61d01fa2d5 | ||
|
|
a6bf6e4e07 | ||
|
|
d454482f43 | ||
|
|
f6aece6349 | ||
|
|
dc9508269d | ||
|
|
a6c41f312d | ||
|
|
f487f7420b | ||
|
|
da8f3a6e81 | ||
|
|
d102c94670 | ||
|
|
60288f7ba0 | ||
|
|
0cbe17a315 | ||
|
|
dce9f36a8e | ||
|
|
aa5100261d | ||
|
|
f4cc2a3a05 | ||
|
|
041a5249b3 | ||
|
|
a767697a86 | ||
|
|
71cb70c62c | ||
|
|
647cabc4f4 | ||
|
|
e864e33ad3 | ||
|
|
5bdbe792f5 | ||
|
|
399efb0fb2 | ||
|
|
4b72de6884 | ||
|
|
9f1473e7de | ||
|
|
d6c4df8b4b | ||
|
|
7150971dc0 | ||
|
|
d0846b8dd2 | ||
|
|
ead6885b29 | ||
|
|
d72dacdc1f | ||
|
|
1d6ddd4890 | ||
|
|
58daca1579 | ||
|
|
1e522ad8f1 | ||
|
|
8809105a8d | ||
|
|
064c3e0449 | ||
|
|
2a348e916c | ||
|
|
5744193f50 | ||
|
|
ccf352f2db | ||
|
|
6e446dc0ab | ||
|
|
566c2becdf | ||
|
|
3b3fd2b3a9 | ||
|
|
eae53d9eff | ||
|
|
42842b6b17 | ||
|
|
95f8dfb4bc | ||
|
|
a8c5934fc5 | ||
|
|
3f2a4d6eac | ||
|
|
170609a81f | ||
|
|
76fccbbba4 | ||
|
|
147ed9f24b | ||
|
|
a69bc321a9 | ||
|
|
c9e02a8b25 | ||
|
|
24d6a1e7b2 | ||
|
|
a0efa63185 | ||
|
|
fd83cea9a0 | ||
|
|
5be1eb58b2 | ||
|
|
8367c106bc | ||
|
|
8064ae1f37 | ||
|
|
ab4d9af442 | ||
|
|
eb0d3374d5 | ||
|
|
6c4c814b3f | ||
|
|
32e8e48928 | ||
|
|
53e7037f48 | ||
|
|
a566b5dc97 | ||
|
|
4dc668fd13 | ||
|
|
d085506d3e | ||
|
|
1b28a4e6f5 | ||
|
|
20e924b116 | ||
|
|
1d28ceb3d7 | ||
|
|
0ff4c040bf | ||
|
|
1002ab553e | ||
|
|
3dc94c8da7 | ||
|
|
5a5aca2113 | ||
|
|
cb22117a0f | ||
|
|
739946fa47 | ||
|
|
7939902f03 | ||
|
|
d34e08fa3d | ||
|
|
5556d1d6fc | ||
|
|
d4d1104a53 | ||
|
|
225a345baa | ||
|
|
31443dabe7 | ||
|
|
0efb901863 | ||
|
|
eb4abe900c | ||
|
|
e7ba5f9f33 | ||
|
|
995232e057 | ||
|
|
cc5d47e3ee | ||
|
|
b1de6c1d7d | ||
|
|
84bfe11285 | ||
|
|
ca78947a55 | ||
|
|
ac49f84982 | ||
|
|
cc47f02ebf | ||
|
|
ac70240b72 | ||
|
|
78b1a750fa | ||
|
|
d5a6336239 | ||
|
|
01bad0f18a | ||
|
|
1b79a9bf35 | ||
|
|
0426bf06eb | ||
|
|
3d8354fb99 | ||
|
|
696241b962 | ||
|
|
8a883f1b5e | ||
|
|
7765cee610 | ||
|
|
b958bad81f | ||
|
|
deff5d5e17 | ||
|
|
44d3f35a5f | ||
|
|
36d8bc7bc6 | ||
|
|
565dfd5b52 | ||
|
|
897c5d2371 | ||
|
|
f22d5f0fbd | ||
|
|
8c56d04988 | ||
|
|
18cfc40982 | ||
|
|
3c66f9d2dd | ||
|
|
4f3bb95a77 | ||
|
|
8aa5eb78b2 | ||
|
|
79a1f79b7c | ||
|
|
1c5c65ddf7 | ||
|
|
b2e78b9358 | ||
|
|
5e02bfe2e4 | ||
|
|
02d89a3a04 | ||
|
|
3ab0e1395a | ||
|
|
f1f606844a | ||
|
|
4e335054fb | ||
|
|
c902a6bac8 | ||
|
|
c00f0f159b | ||
|
|
86bdb9a5ad | ||
|
|
044f02c7c7 | ||
|
|
561d18efec | ||
|
|
ab10a699b1 | ||
|
|
e28733d246 | ||
|
|
a238123eb2 | ||
|
|
4337ab5cd0 | ||
|
|
7a3a3b8d89 | ||
|
|
a9cbd12330 | ||
|
|
c320c20280 | ||
|
|
b3c2fe75d3 | ||
|
|
95d3a27769 | ||
|
|
67f09c6def | ||
|
|
eaeba43179 | ||
|
|
daadc584ea | ||
|
|
da8b16f588 | ||
|
|
17738a58a2 | ||
|
|
3ebffae1c6 | ||
|
|
2ca67f1017 | ||
|
|
00c7eccb0c | ||
|
|
7f3d9e2e35 | ||
|
|
a95656b3a0 | ||
|
|
9404768f9d | ||
|
|
7559445ebe | ||
|
|
112766b265 | ||
|
|
ccf5af089d | ||
|
|
0b6f31420b | ||
|
|
b4ce805c6f | ||
|
|
08f24fbdff | ||
|
|
191925b418 | ||
|
|
84b70c970f | ||
|
|
988ce36047 | ||
|
|
24a4177a73 | ||
|
|
08ca3b7849 | ||
|
|
d0723207c3 | ||
|
|
16cf829ec3 | ||
|
|
23f9949fad | ||
|
|
fafdd4b87f | ||
|
|
f2ace729fd | ||
|
|
f0c627eebe | ||
|
|
1bf8e6bef6 | ||
|
|
f37e6ef1d1 | ||
|
|
c3ebbfa8ca | ||
|
|
12970d6975 | ||
|
|
1112ff7e7a | ||
|
|
09fd877b2a | ||
|
|
c04c0284dc | ||
|
|
239cdad57b | ||
|
|
314f95a914 | ||
|
|
e070ba61cd | ||
|
|
79576b476f | ||
|
|
8e4f987cf6 | ||
|
|
3fe3bde0c7 | ||
|
|
efe57ff15d | ||
|
|
b8a6a27fad | ||
|
|
fb3dbcf662 | ||
|
|
cb04979bb7 | ||
|
|
967b83a5d0 | ||
|
|
3fd086db4d | ||
|
|
9aedcc1777 | ||
|
|
0fb5b90e4e | ||
|
|
91bdb77a0e | ||
|
|
e7d2bb13dc | ||
|
|
39c3f67d86 | ||
|
|
49d1015a72 | ||
|
|
f3b2f30c82 | ||
|
|
ff9d81aefc | ||
|
|
eb63b6da2a | ||
|
|
c9b07ee5dd | ||
|
|
bda8ddc0e4 | ||
|
|
540a682bf0 | ||
|
|
9f7c60d2c1 | ||
|
|
80a06300ff | ||
|
|
7fae5207d1 | ||
|
|
b4f781ad47 | ||
|
|
d9468d438d | ||
|
|
25559c8781 | ||
|
|
aa760336d0 | ||
|
|
46aaa8199d | ||
|
|
10faa6f42b | ||
|
|
2d5ad346a6 | ||
|
|
207360e074 | ||
|
|
a4b954e304 | ||
|
|
3e24e371f4 | ||
|
|
bdeadaeff6 | ||
|
|
43b39f1a7c | ||
|
|
6ab7fea8de | ||
|
|
05371085f9 | ||
|
|
abe4940e74 | ||
|
|
938f42bd4f | ||
|
|
1652f35f1a | ||
|
|
3a67d0237c | ||
|
|
7888865ddc | ||
|
|
25fb0deb8d | ||
|
|
b067db633a | ||
|
|
9094f20070 | ||
|
|
9129704a93 | ||
|
|
fc244edc50 | ||
|
|
cbe635bc94 | ||
|
|
bb0a7a956a | ||
|
|
2800b021e6 | ||
|
|
74170ffb4a | ||
|
|
ab72e92fc6 | ||
|
|
8b224dd59c | ||
|
|
ce77a3cd80 | ||
|
|
79fa4e7b74 | ||
|
|
90e97856a9 | ||
|
|
61fb11a232 | ||
|
|
729ea586a8 | ||
|
|
eb28459847 | ||
|
|
10fc3bf456 | ||
|
|
244a07aef8 | ||
|
|
92cf617a53 | ||
|
|
1eb333a890 | ||
|
|
ebc280db0e | ||
|
|
3beb7f1843 | ||
|
|
56c9ea5430 | ||
|
|
05c79b7119 | ||
|
|
a6da00f801 | ||
|
|
0514c5035e | ||
|
|
2df058825a | ||
|
|
a07b8999c0 | ||
|
|
5405f0d9ed | ||
|
|
00cefe2306 | ||
|
|
8e621b6a70 | ||
|
|
5aa46c7e96 | ||
|
|
c367f928c5 | ||
|
|
4c2f287e06 | ||
|
|
567c81c0f9 | ||
|
|
31f0a9f0b6 | ||
|
|
bad14a7d09 | ||
|
|
28e8cffeaa | ||
|
|
3302a84ab5 | ||
|
|
5510c67dbf | ||
|
|
347d73022a | ||
|
|
79e43f594d | ||
|
|
0df3052e13 | ||
|
|
baba0c389c | ||
|
|
43983bc643 | ||
|
|
4b9ef95f7a | ||
|
|
3db790c3c7 | ||
|
|
7d68e9cebc | ||
|
|
1e6df307a0 | ||
|
|
e6a53d6c06 | ||
|
|
db9052ea6e | ||
|
|
45eb201efd | ||
|
|
744dfd010a | ||
|
|
dc737f385a | ||
|
|
0d98a4fd0c | ||
|
|
09344cfb44 | ||
|
|
0ae5b824d9 | ||
|
|
828bd3bac6 | ||
|
|
10f4fb53ac | ||
|
|
fdc8670fab | ||
|
|
d7fa503f04 | ||
|
|
b37bc0620d | ||
|
|
9bf37b391e | ||
|
|
a5bf4c1a61 | ||
|
|
420f601a68 | ||
|
|
03eac8963f | ||
|
|
ffd2c9b466 | ||
|
|
51b11d0119 | ||
|
|
c5ee8cd586 | ||
|
|
acbe7f91cb | ||
|
|
07f4fcb216 | ||
|
|
1ee2e302e2 | ||
|
|
be8ec756c6 | ||
|
|
2de6574835 | ||
|
|
1acf009e62 | ||
|
|
9a00dca749 | ||
|
|
d22d1d1c3b | ||
|
|
6342e1cebc | ||
|
|
f74467e33c | ||
|
|
821b80acde | ||
|
|
d41502df98 | ||
|
|
c1d4dc2ad6 | ||
|
|
07183d5189 | ||
|
|
57992134bc | ||
|
|
e0a7002a29 | ||
|
|
cd04a39d3d | ||
|
|
c372eac3e3 | ||
|
|
fdd9287847 | ||
|
|
24f1404741 | ||
|
|
48ac20885f | ||
|
|
ebb1c2ac79 | ||
|
|
e3c4bfce52 | ||
|
|
d92a3e64f5 | ||
|
|
24162f8f96 | ||
|
|
c7ffac46f5 | ||
|
|
0d1526f6af | ||
|
|
b0d68ac00f | ||
|
|
8f0df5e1e3 | ||
|
|
16fbbf8a0e | ||
|
|
e823987eb0 | ||
|
|
f5abab6766 | ||
|
|
b55c362bbb | ||
|
|
6e6fd9b44b | ||
|
|
50a88a8726 | ||
|
|
0f4e5c9ef0 | ||
|
|
be5d1f0090 | ||
|
|
0ab91707e9 | ||
|
|
dcb17fb33a | ||
|
|
9c07ac376d | ||
|
|
40f9a4a5aa | ||
|
|
8059fe14da | ||
|
|
2f665fcc63 | ||
|
|
0e6a1082dc | ||
|
|
1568075769 | ||
|
|
cbbd3ce6ad | ||
|
|
af2399e627 | ||
|
|
50e8f32291 | ||
|
|
8467d6a00c | ||
|
|
cac31072a9 | ||
|
|
8b47f4af21 | ||
|
|
b0b235cbc5 | ||
|
|
e0e4c7afe6 | ||
|
|
1eb0013352 | ||
|
|
68d68c2b57 | ||
|
|
274f1fe07f | ||
|
|
51dc54bcb9 | ||
|
|
0bc82237fc | ||
|
|
53045fc58e | ||
|
|
9808c03d6d | ||
|
|
cf5036bc31 | ||
|
|
e08bf85edf | ||
|
|
e555e05f58 | ||
|
|
88c3f50cb1 | ||
|
|
e4ef059d19 | ||
|
|
b433d4ad4a | ||
|
|
b3d5d6c281 | ||
|
|
9a2aa7bcbd | ||
|
|
63c2e67cfc | ||
|
|
1b398039e3 | ||
|
|
e7253b7ca6 | ||
|
|
dddba68bfd | ||
|
|
40c287028b | ||
|
|
30124de409 | ||
|
|
f9c214bd53 | ||
|
|
7d50895464 | ||
|
|
2031377dcb | ||
|
|
d7eb9f7d0d | ||
|
|
7f800c94ae | ||
|
|
0f39643a56 | ||
|
|
3b687201a6 | ||
|
|
f449775cd6 | ||
|
|
ff2e9ae084 | ||
|
|
7f5b0c15c7 | ||
|
|
d825fc2f30 | ||
|
|
55feec34ea | ||
|
|
6f8b78bd97 | ||
|
|
c4fa86bc95 | ||
|
|
14ea7bd86a | ||
|
|
8bf0123370 | ||
|
|
dc9ffcdd45 | ||
|
|
c79223742f | ||
|
|
d9a99827c0 | ||
|
|
1bf6faff8b | ||
|
|
95fb068bb0 | ||
|
|
0907ed280b | ||
|
|
fc7a0a8354 | ||
|
|
b27e30db58 | ||
|
|
1ab291f3e8 | ||
|
|
552d193cef | ||
|
|
ba0f06f381 | ||
|
|
bbbb1c1d60 | ||
|
|
32a09d4ca2 | ||
|
|
7ae43cf511 | ||
|
|
d1887fdbfe | ||
|
|
5414a73b40 | ||
|
|
7f116d1597 | ||
|
|
2c1b530aa0 | ||
|
|
231b8467fd | ||
|
|
24910f6a39 | ||
|
|
03bf9afe03 | ||
|
|
fbf047a477 | ||
|
|
afc0559456 | ||
|
|
34e9247562 | ||
|
|
bbd90bff0c | ||
|
|
3e0f5a866d | ||
|
|
fb634303e8 | ||
|
|
5e828bf174 | ||
|
|
8f2ed0e46f | ||
|
|
19b25219f4 | ||
|
|
447700326a | ||
|
|
3ed4fb2b75 | ||
|
|
32750fa2af | ||
|
|
47b0671b27 | ||
|
|
9bc62d83d3 | ||
|
|
5e4cff7ae2 | ||
|
|
271ffbd1a1 | ||
|
|
5b691d26e4 | ||
|
|
6b6070fd45 | ||
|
|
28bf2fe3f7 | ||
|
|
849c3d1156 | ||
|
|
b47a3e0932 | ||
|
|
4427f60708 | ||
|
|
319ca6af07 | ||
|
|
0f59baf740 | ||
|
|
4dfbdd2d63 | ||
|
|
73b5134971 | ||
|
|
20879dcf2e | ||
|
|
e2a5729c5e | ||
|
|
556914f808 | ||
|
|
c8830c9e3a | ||
|
|
865e487fc3 | ||
|
|
36e95332bc | ||
|
|
f934f96dd8 | ||
|
|
148d7d99ed | ||
|
|
43369bdefb | ||
|
|
da5bf3aea0 | ||
|
|
28c8df5e63 | ||
|
|
510815655f | ||
|
|
53d52254cb | ||
|
|
655061212f | ||
|
|
ec11abf54e | ||
|
|
f3fb325a13 | ||
|
|
fa11cd651e | ||
|
|
6f57a0c9b2 | ||
|
|
ae4f75e54b | ||
|
|
1e33f16943 | ||
|
|
b4a9b248cf | ||
|
|
4ce1297856 | ||
|
|
e46ff48b80 | ||
|
|
73e3e061e0 | ||
|
|
992644dff7 | ||
|
|
dea74c5f8a | ||
|
|
d2b107ec7f | ||
|
|
1ebf1a3675 | ||
|
|
02446579a6 | ||
|
|
56fcc0c4a7 | ||
|
|
ce78013646 | ||
|
|
8e7367cae1 | ||
|
|
6f4327bfa1 | ||
|
|
050da9a2a9 | ||
|
|
2c1c0ceea6 | ||
|
|
d69a637275 | ||
|
|
d10b9790dc | ||
|
|
917ef533a3 | ||
|
|
76102dfc7e | ||
|
|
a576092cd4 | ||
|
|
a5fd338a9d | ||
|
|
8a781076e1 | ||
|
|
18fc697b91 | ||
|
|
93b347d25e | ||
|
|
ea5b40c7ea | ||
|
|
cc91242e9a | ||
|
|
e756cefa75 | ||
|
|
da653681cf | ||
|
|
93b5a945a4 | ||
|
|
9ab1093d81 | ||
|
|
b4754053aa | ||
|
|
8fef964485 | ||
|
|
004dda200c | ||
|
|
6a01ce88cb | ||
|
|
ade8cda91c | ||
|
|
3b7484f423 | ||
|
|
959aad252c | ||
|
|
7b70cfb0c4 | ||
|
|
53d5e37b5f | ||
|
|
69821cc13c | ||
|
|
a555c2cb93 | ||
|
|
24d51e3c3a | ||
|
|
368e11e17a | ||
|
|
118ee9dd90 | ||
|
|
30961da59f | ||
|
|
9692b9985a | ||
|
|
98ab64cb94 | ||
|
|
f883d02ff7 | ||
|
|
fff9031bf7 | ||
|
|
32ad4ef571 | ||
|
|
66b423588e | ||
|
|
e0be48a527 | ||
|
|
4143e313da | ||
|
|
e1fbe265d8 | ||
|
|
84002fa123 | ||
|
|
7f2546ec97 | ||
|
|
68b1b8d975 | ||
|
|
ac789ffcf0 | ||
|
|
1272e18672 | ||
|
|
9008d19a7b | ||
|
|
f394f72bfb | ||
|
|
45cb0353e6 | ||
|
|
506c34f385 | ||
|
|
aca67d4f33 | ||
|
|
c00e9f5236 | ||
|
|
8cef9de35c | ||
|
|
4817a17320 | ||
|
|
0055eaf656 | ||
|
|
4b205e61c8 | ||
|
|
77c0237ba1 | ||
|
|
a2acee209c | ||
|
|
5381dc7e56 | ||
|
|
76848b8925 | ||
|
|
3f32322385 | ||
|
|
29c26777b6 | ||
|
|
f861a5c77d | ||
|
|
d1a4bb10ea | ||
|
|
41e2c6b075 | ||
|
|
b5b0725cc4 | ||
|
|
3acef9c86a | ||
|
|
48e32878e6 | ||
|
|
f070bdf5b9 | ||
|
|
888e3ff79b | ||
|
|
3e8c9308b6 | ||
|
|
a3c1080519 | ||
|
|
350a49060f | ||
|
|
35abf2dddf | ||
|
|
9a32db608f | ||
|
|
11c94c0fbb | ||
|
|
5431333b1a | ||
|
|
dd34bd5990 | ||
|
|
a8adadcfff | ||
|
|
db3c7a3b9f | ||
|
|
1d479ca158 | ||
|
|
76e7bd5292 | ||
|
|
2f99a661de | ||
|
|
b1169c7315 | ||
|
|
ec0059dbd8 | ||
|
|
3d4bdec4ba | ||
|
|
437de3682c | ||
|
|
9e76106ee0 | ||
|
|
553967f76b | ||
|
|
beb15e0a5f | ||
|
|
27e2ceffaa | ||
|
|
7de1737f5f | ||
|
|
7917b5f3b4 | ||
|
|
470b68aa67 | ||
|
|
1255da08dc | ||
|
|
8c21aeb2a9 | ||
|
|
6f2b66c286 | ||
|
|
d09c63331d | ||
|
|
af68ca52ba | ||
|
|
655f8b65a2 | ||
|
|
c07f68333e | ||
|
|
421a102291 | ||
|
|
1d5ce423f2 | ||
|
|
0c12eaf89b | ||
|
|
09586faed2 | ||
|
|
bf0c1a3dcc | ||
|
|
77abb47b4c | ||
|
|
2e71230bbf | ||
|
|
bf2002d6a2 | ||
|
|
59749d0576 | ||
|
|
c9c6ccc687 | ||
|
|
88082c1278 | ||
|
|
84f150bc18 | ||
|
|
299e80ca49 | ||
|
|
7127dec6f6 | ||
|
|
6afb91fa84 | ||
|
|
72f5688194 | ||
|
|
a118c3c8a1 | ||
|
|
9baf873521 | ||
|
|
12911db945 | ||
|
|
bd149f4364 | ||
|
|
c69b9ae62a | ||
|
|
bc3f16d3de | ||
|
|
61bbd5551b | ||
|
|
286577d13d | ||
|
|
dbd0701779 | ||
|
|
0c7a5ce3c7 | ||
|
|
a92381df1b | ||
|
|
eb1509d385 | ||
|
|
34e33af290 | ||
|
|
c0004cd51c | ||
|
|
10bf545c65 | ||
|
|
7d2bcf11c3 | ||
|
|
3ff7ace54e | ||
|
|
abdfe6ccc5 | ||
|
|
aa398263fb | ||
|
|
ace02486e0 | ||
|
|
b318ba6b2f | ||
|
|
de4be411f4 | ||
|
|
362f264bae | ||
|
|
e94d984cdb | ||
|
|
bf0267d579 | ||
|
|
e4b3ea1f34 | ||
|
|
4ee6d4b546 | ||
|
|
a7836c26d0 | ||
|
|
15eb5364d5 | ||
|
|
47bf512a33 | ||
|
|
2776bfa311 | ||
|
|
8c7ac88f84 | ||
|
|
a08ad9e2cf | ||
|
|
d312398f18 | ||
|
|
d891c3e118 | ||
|
|
1e7b68203f | ||
|
|
3d152e23cd | ||
|
|
47cf1eebf7 | ||
|
|
6c84882dca | ||
|
|
a4424eca0e | ||
|
|
77992a59bc | ||
|
|
3cbb071138 | ||
|
|
9cd6e5cabe | ||
|
|
13bec63fca | ||
|
|
f2164a1a86 | ||
|
|
8a4f58e77b | ||
|
|
51a24673b9 | ||
|
|
c94feb9af2 | ||
|
|
a8668d19a8 | ||
|
|
a8e81c9666 | ||
|
|
2eed75560d | ||
|
|
8d6fb7f897 | ||
|
|
4cd0088029 | ||
|
|
872c8adbbb | ||
|
|
bba7344bae | ||
|
|
51fe634566 | ||
|
|
af58d085a0 | ||
|
|
5b9b344816 | ||
|
|
1caa07e0af | ||
|
|
ae23cec8d6 | ||
|
|
5afc04f205 | ||
|
|
6aed23ce66 | ||
|
|
007e2e7b78 | ||
|
|
762a3cdfcd | ||
|
|
308f8f8fed | ||
|
|
588bf2b93a | ||
|
|
fff38b58d2 | ||
|
|
cbd2036613 | ||
|
|
7ef72d4147 | ||
|
|
07af5c843a | ||
|
|
e524ce5743 | ||
|
|
24e1346521 | ||
|
|
62e77613a6 | ||
|
|
56c0265660 | ||
|
|
91b1d08dff | ||
|
|
239c2cb859 | ||
|
|
4173258d0a | ||
|
|
1cbbdd8265 | ||
|
|
433f3f3d94 | ||
|
|
fed23a6ab9 | ||
|
|
b979c24cb4 | ||
|
|
e4b41b1a27 | ||
|
|
44495b7669 | ||
|
|
cc3133b2d6 | ||
|
|
9c83319143 | ||
|
|
571c08c58e | ||
|
|
092cfc7804 | ||
|
|
245050aac2 | ||
|
|
606fa6591d | ||
|
|
a1468cf126 |
33
.editorconfig
Normal file
33
.editorconfig
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# EditorConfig is awesome: https://EditorConfig.org
|
||||||
|
|
||||||
|
# top-most EditorConfig file
|
||||||
|
root = true
|
||||||
|
|
||||||
|
# Unix-style newlines with a newline ending every file
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
# Matches multiple files with brace expansion notation
|
||||||
|
# Set default charset
|
||||||
|
[*.{js,py}]
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
# 4 space indentation
|
||||||
|
[*.py]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
# Tab indentation (no size specified)
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
||||||
|
|
||||||
|
# Indentation override for all JS under lib directory
|
||||||
|
[lib/**.js]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
# Matches the exact files either package.json or .travis.yml
|
||||||
|
[{package.json,.travis.yml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
7
.github/CODEOWNERS
vendored
Normal file
7
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
||||||
|
# Owners are automatically requested for review for PRs that changes code
|
||||||
|
# that they own.
|
||||||
|
* @ankitnayan
|
||||||
|
/frontend/ @palashgdev @pranshuchittora
|
||||||
|
/deploy/ @prashant-shahi
|
||||||
|
/pkg/query-service/ @srikanthccv
|
||||||
34
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
34
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Bug description
|
||||||
|
|
||||||
|
*Please describe.*
|
||||||
|
*If this affects the front-end, screenshots would be of great help.*
|
||||||
|
|
||||||
|
## Expected behavior
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## How to reproduce
|
||||||
|
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
|
||||||
|
## Version information
|
||||||
|
* **Signoz version**:
|
||||||
|
* **Browser version**:
|
||||||
|
* **Your OS and version**:
|
||||||
|
* **Your CPU Architecture**(ARM/Intel):
|
||||||
|
|
||||||
|
## Additional context
|
||||||
|
|
||||||
|
|
||||||
|
#### *Thank you* for your bug report – we love squashing them!
|
||||||
27
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Is your feature request related to a problem?
|
||||||
|
|
||||||
|
*Please describe.*
|
||||||
|
|
||||||
|
## Describe the solution you'd like
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Describe alternatives you've considered
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Additional context
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### *Thank you* for your feature request – we love each and every one!
|
||||||
33
.github/ISSUE_TEMPLATE/performance-issue-report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/performance-issue-report.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
name: Performance issue report
|
||||||
|
about: Long response times, high resource usage? Ensuring that SigNoz is scalable
|
||||||
|
is our top priority
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## In what situation are you experiencing subpar performance?
|
||||||
|
|
||||||
|
*Please describe.*
|
||||||
|
|
||||||
|
## How to reproduce
|
||||||
|
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
|
||||||
|
## Your Environment
|
||||||
|
|
||||||
|
- [ ] Linux
|
||||||
|
- [ ] Mac
|
||||||
|
- [ ] Windows
|
||||||
|
|
||||||
|
Please provide details of OS version etc.
|
||||||
|
|
||||||
|
## Additional context
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### *Thank you* for your performance issue report – we want SigNoz to be blazing fast!
|
||||||
31
.github/config.yml
vendored
Normal file
31
.github/config.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Configuration for welcome - https://github.com/behaviorbot/welcome
|
||||||
|
|
||||||
|
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
|
||||||
|
# Comment to be posted to on first time issues
|
||||||
|
newIssueWelcomeComment: >
|
||||||
|
Thanks for opening this issue. A team member should give feedback soon.
|
||||||
|
In the meantime, feel free to check out the [contributing guidelines](https://github.com/signoz/signoz/blob/main/CONTRIBUTING.md).
|
||||||
|
|
||||||
|
|
||||||
|
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
|
||||||
|
# Comment to be posted to on PRs from first time contributors in your repository
|
||||||
|
newPRWelcomeComment: >
|
||||||
|
Welcome to the SigNoz community! Thank you for your first pull request and making this project better. 🤗
|
||||||
|
|
||||||
|
|
||||||
|
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
|
||||||
|
# Comment to be posted to on pull requests merged by a first time user
|
||||||
|
firstPRMergeComment: >
|
||||||
|
Congrats on merging your first pull request!
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
We here at SigNoz are proud of you! 🥳
|
||||||
|
|
||||||
|
|
||||||
|
# Configuration for request-info - https://github.com/behaviorbot/request-info
|
||||||
|
# Comment to be posted in issues or pull requests, when no description is provided.
|
||||||
|
requestInfoReplyComment: >
|
||||||
|
We would appreciate it if you could provide us with more info about this issue/pr!
|
||||||
|
|
||||||
|
requestInfoLabelToAdd: request-more-info
|
||||||
29
.github/release-drafter.yml
vendored
Normal file
29
.github/release-drafter.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name-template: 'v$RESOLVED_VERSION'
|
||||||
|
tag-template: 'v$RESOLVED_VERSION'
|
||||||
|
template: |
|
||||||
|
# What's Changed
|
||||||
|
$CHANGES
|
||||||
|
autolabeler:
|
||||||
|
- label: 'chore'
|
||||||
|
title:
|
||||||
|
- '/chore/i'
|
||||||
|
- label: 'bug'
|
||||||
|
title:
|
||||||
|
- '/fix/i'
|
||||||
|
- label: 'enhancement'
|
||||||
|
title:
|
||||||
|
- '/feat/i'
|
||||||
|
|
||||||
|
categories:
|
||||||
|
- title: '🚀 Features'
|
||||||
|
label: 'enhancement'
|
||||||
|
- title: '🐛 Bug Fixes'
|
||||||
|
labels:
|
||||||
|
- 'bug'
|
||||||
|
- title: '🧰 Maintenance'
|
||||||
|
label: 'chore'
|
||||||
|
- title: 'Breaking'
|
||||||
|
label: 'breaking'
|
||||||
|
|
||||||
|
exclude-labels:
|
||||||
|
- 'skip-changelog'
|
||||||
42
.github/workflows/README.md
vendored
Normal file
42
.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Github actions
|
||||||
|
|
||||||
|
## Testing the UI manually on each PR
|
||||||
|
|
||||||
|
First we need to make sure the UI is ready
|
||||||
|
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
|
||||||
|
* This job will run until the PR is merged or closed to keep the local tunneling alive
|
||||||
|
- github will cancel this job if the PR wasn't merged after 6h
|
||||||
|
- if the job was cancel, go to the action and press `Re-run all jobs`
|
||||||
|
|
||||||
|
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<th> Variables </th>
|
||||||
|
<th> Description </th>
|
||||||
|
<th> Example </th>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td> REPONAME </td>
|
||||||
|
<td> Provide the DockerHub user/organisation name of the image. </td>
|
||||||
|
<td> signoz</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td> DOCKERHUB_USERNAME </td>
|
||||||
|
<td> Docker hub username </td>
|
||||||
|
<td> signoz</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td> DOCKERHUB_TOKEN </td>
|
||||||
|
<td> Docker hub password/token with push permission </td>
|
||||||
|
<td> **** </td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td> SONAR_TOKEN </td>
|
||||||
|
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
||||||
|
<td> **** </td>
|
||||||
|
</tr>
|
||||||
38
.github/workflows/build.yaml
vendored
Normal file
38
.github/workflows/build.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: build-pipeline
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
- main
|
||||||
|
- release/v*
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-frontend:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Install dependencies
|
||||||
|
run: cd frontend && yarn install
|
||||||
|
- name: Run ESLint
|
||||||
|
run: cd frontend && npm run lint
|
||||||
|
- name: Run Jest
|
||||||
|
run: cd frontend && npm run jest
|
||||||
|
- name: TSC
|
||||||
|
run: yarn tsc
|
||||||
|
working-directory: ./frontend
|
||||||
|
- name: Build frontend docker image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
make build-frontend-amd64
|
||||||
|
|
||||||
|
build-query-service:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Build query-service image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
make build-query-service-amd64
|
||||||
17
.github/workflows/codeball.yml
vendored
Normal file
17
.github/workflows/codeball.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
name: Codeball
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codeball_job:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Codeball
|
||||||
|
steps:
|
||||||
|
# Run Codeball on all new Pull Requests 🚀
|
||||||
|
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
|
||||||
|
- name: Codeball
|
||||||
|
uses: sturdy-dev/codeball-action@v2
|
||||||
|
with:
|
||||||
|
approvePullRequests: "true"
|
||||||
|
labelPullRequestsWhenApproved: "true"
|
||||||
|
labelPullRequestsWhenReviewNeeded: "false"
|
||||||
|
failJobsWhenReviewNeeded: "false"
|
||||||
71
.github/workflows/codeql.yaml
vendored
Normal file
71
.github/workflows/codeql.yaml
vendored
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main, v* ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ main ]
|
||||||
|
schedule:
|
||||||
|
- cron: '32 5 * * 5'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'go', 'javascript', 'python' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
||||||
18
.github/workflows/commitlint.yml
vendored
Normal file
18
.github/workflows/commitlint.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: commitlint
|
||||||
|
on: [pull_request]
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: frontend
|
||||||
|
jobs:
|
||||||
|
lint-commits:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2.3.1
|
||||||
|
with:
|
||||||
|
# we actually need "github.event.pull_request.commits + 1" commit
|
||||||
|
fetch-depth: 0
|
||||||
|
- uses: actions/setup-node@v2.1.0
|
||||||
|
# or just "yarn" if you depend on "@commitlint/cli" already
|
||||||
|
- run: yarn add @commitlint/cli
|
||||||
|
- run: yarn add @commitlint/config-conventional
|
||||||
|
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD
|
||||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- closed
|
||||||
|
|
||||||
|
env:
|
||||||
|
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||||
|
PR_NUMBER: ${{ github.event.number }}
|
||||||
|
jobs:
|
||||||
|
create_issue_on_merge:
|
||||||
|
if: github.event.pull_request.merged == true
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Codebase
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
repository: signoz/gh-bot
|
||||||
|
- name: Use Node v16
|
||||||
|
uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: 16
|
||||||
|
- name: Setup Cache & Install Dependencies
|
||||||
|
uses: bahmutov/npm-install@v1
|
||||||
|
with:
|
||||||
|
install-command: yarn --frozen-lockfile
|
||||||
|
- name: Comment on PR
|
||||||
|
run: node create-issue.js
|
||||||
22
.github/workflows/dependency-review.yml
vendored
Normal file
22
.github/workflows/dependency-review.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Dependency Review Action
|
||||||
|
#
|
||||||
|
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
|
||||||
|
#
|
||||||
|
# Source repository: https://github.com/actions/dependency-review-action
|
||||||
|
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||||
|
name: 'Dependency Review'
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dependency-review:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: 'Checkout Repository'
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: 'Dependency Review'
|
||||||
|
with:
|
||||||
|
fail-on-severity: high
|
||||||
|
uses: actions/dependency-review-action@v2
|
||||||
84
.github/workflows/e2e-k3s.yaml
vendored
Normal file
84
.github/workflows/e2e-k3s.yaml
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
name: e2e-k3s
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [labeled]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
e2e-k3s:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.event.label.name == 'ok-to-test' }}
|
||||||
|
env:
|
||||||
|
DOCKER_TAG: pull-${{ github.event.number }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Build query-service image
|
||||||
|
run: make build-query-service-amd64
|
||||||
|
|
||||||
|
- name: Build frontend image
|
||||||
|
run: make build-frontend-amd64
|
||||||
|
|
||||||
|
- name: Create a k3s cluster
|
||||||
|
uses: AbsaOSS/k3d-action@v2
|
||||||
|
with:
|
||||||
|
cluster-name: "signoz"
|
||||||
|
|
||||||
|
- name: Inject the images to the cluster
|
||||||
|
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
|
||||||
|
|
||||||
|
- name: Set up HotROD sample-app
|
||||||
|
run: |
|
||||||
|
# create sample-application namespace
|
||||||
|
kubectl create ns sample-application
|
||||||
|
|
||||||
|
# apply hotrod k8s manifest file
|
||||||
|
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||||
|
|
||||||
|
# wait for all deployments in sample-application namespace to be READY
|
||||||
|
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||||
|
|
||||||
|
- name: Deploy the app
|
||||||
|
run: |
|
||||||
|
# add signoz helm repository
|
||||||
|
helm repo add signoz https://charts.signoz.io
|
||||||
|
|
||||||
|
# create platform namespace
|
||||||
|
kubectl create ns platform
|
||||||
|
|
||||||
|
# installing signoz using helm
|
||||||
|
helm install my-release signoz/signoz -n platform \
|
||||||
|
--wait \
|
||||||
|
--timeout 10m0s \
|
||||||
|
--set frontend.service.type=LoadBalancer \
|
||||||
|
--set queryService.image.tag=$DOCKER_TAG \
|
||||||
|
--set frontend.image.tag=$DOCKER_TAG
|
||||||
|
|
||||||
|
# get pods, services and the container images
|
||||||
|
kubectl get pods -n platform
|
||||||
|
kubectl get svc -n platform
|
||||||
|
|
||||||
|
- name: Kick off a sample-app workload
|
||||||
|
run: |
|
||||||
|
# start the locust swarm
|
||||||
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
|
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
||||||
|
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||||
|
|
||||||
|
- name: Get short commit SHA and display tunnel URL
|
||||||
|
id: get-subdomain
|
||||||
|
run: |
|
||||||
|
subdomain="pr-$(git rev-parse --short HEAD)"
|
||||||
|
echo "URL for tunnelling: https://$subdomain.loca.lt"
|
||||||
|
echo "::set-output name=subdomain::$subdomain"
|
||||||
|
|
||||||
|
- name: Start tunnel
|
||||||
|
env:
|
||||||
|
SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }}
|
||||||
|
run: |
|
||||||
|
npm install -g localtunnel
|
||||||
|
host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4)
|
||||||
|
port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1)
|
||||||
|
lt -p $port -l $host -s $SUBDOMAIN
|
||||||
24
.github/workflows/playwright.yaml
vendored
Normal file
24
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: Playwright Tests
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
playwright:
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: frontend
|
||||||
|
timeout-minutes: 60
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: "16.x"
|
||||||
|
- name: Install dependencies
|
||||||
|
run: CI=1 yarn install
|
||||||
|
- name: Install Playwright
|
||||||
|
run: npx playwright install --with-deps
|
||||||
|
- name: Run Playwright tests
|
||||||
|
run: yarn playwright
|
||||||
|
env:
|
||||||
|
# This might depend on your test-runner/language binding
|
||||||
|
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}
|
||||||
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||||
|
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||||
|
# a comment alerting users of this missing requirement.
|
||||||
|
name: VerifyIssue
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [edited, synchronize, opened, reopened]
|
||||||
|
check_run:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
verify_linked_issue:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Ensure Pull Request has a linked issue.
|
||||||
|
steps:
|
||||||
|
- name: Verify Linked Issue
|
||||||
|
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
90
.github/workflows/push.yaml
vendored
Normal file
90
.github/workflows/push.yaml
vendored
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
name: push
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- develop
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
image-build-and-push-query-service:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- uses: benjlevesque/short-sha@v1.2
|
||||||
|
id: short-sha
|
||||||
|
- name: Get branch name
|
||||||
|
id: branch-name
|
||||||
|
uses: tj-actions/branch-names@v5.1
|
||||||
|
- name: Set docker tag environment
|
||||||
|
run: |
|
||||||
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
|
tag="${{ steps.branch-name.outputs.tag }}"
|
||||||
|
tag="${tag:1}"
|
||||||
|
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
|
||||||
|
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||||
|
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
- name: Build and push docker image
|
||||||
|
run: make build-push-query-service
|
||||||
|
|
||||||
|
image-build-and-push-frontend:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: yarn install
|
||||||
|
- name: Run Prettier
|
||||||
|
working-directory: frontend
|
||||||
|
run: npm run prettify
|
||||||
|
continue-on-error: true
|
||||||
|
- name: Run ESLint
|
||||||
|
working-directory: frontend
|
||||||
|
run: npm run lint
|
||||||
|
continue-on-error: true
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- uses: benjlevesque/short-sha@v1.2
|
||||||
|
id: short-sha
|
||||||
|
- name: Get branch name
|
||||||
|
id: branch-name
|
||||||
|
uses: tj-actions/branch-names@v5.1
|
||||||
|
- name: Set docker tag environment
|
||||||
|
run: |
|
||||||
|
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||||
|
tag="${{ steps.branch-name.outputs.tag }}"
|
||||||
|
tag="${tag:1}"
|
||||||
|
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
|
||||||
|
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||||
|
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
- name: Build and push docker image
|
||||||
|
run: make build-push-frontend
|
||||||
29
.github/workflows/release-drafter.yml
vendored
Normal file
29
.github/workflows/release-drafter.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: Release Drafter
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
# branches to consider in the event; optional, defaults to all
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
# pull_request event is required only for autolabeler
|
||||||
|
pull_request:
|
||||||
|
# Only following types are handled by the action, but one can default to all as well
|
||||||
|
types: [opened, reopened, synchronize]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update_release_draft:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# (Optional) GitHub Enterprise requires GHE_HOST variable set
|
||||||
|
#- name: Set GHE_HOST
|
||||||
|
# run: |
|
||||||
|
# echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Drafts your next Release notes as Pull Requests are merged into "master"
|
||||||
|
- uses: release-drafter/release-drafter@v5
|
||||||
|
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
|
||||||
|
# with:
|
||||||
|
# config-name: my-config.yml
|
||||||
|
# disable-autolabeler: true
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
16
.github/workflows/remove-label.yaml
vendored
Normal file
16
.github/workflows/remove-label.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
name: remove-label
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [synchronize]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
remove:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Remove label
|
||||||
|
uses: buildsville/add-remove-label@v1
|
||||||
|
with:
|
||||||
|
label: ok-to-test
|
||||||
|
type: remove
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
25
.github/workflows/repo-stats.yml
vendored
Normal file
25
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Run this once per day, towards the end of the day for keeping the most
|
||||||
|
# recent data point most meaningful (hours are interpreted in UTC).
|
||||||
|
- cron: "0 8 * * *"
|
||||||
|
workflow_dispatch: # Allow for running this manually.
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
j1:
|
||||||
|
name: repostats
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: run-ghrs
|
||||||
|
uses: jgehrcke/github-repo-stats@v1.1.0
|
||||||
|
with:
|
||||||
|
# Define the stats repository (the repo to fetch
|
||||||
|
# stats for and to generate the report for).
|
||||||
|
# Remove the parameter when the stats repository
|
||||||
|
# and the data repository are the same.
|
||||||
|
repository: signoz/signoz
|
||||||
|
# Set a GitHub API token that can read the stats
|
||||||
|
# repository, and that can push to the data
|
||||||
|
# repository (which this workflow file lives in),
|
||||||
|
# to store data and the report files.
|
||||||
|
ghtoken: ${{ github.token }}
|
||||||
27
.github/workflows/sonar.yml
vendored
Normal file
27
.github/workflows/sonar.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
name: sonar
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- v*
|
||||||
|
paths:
|
||||||
|
- 'frontend/**'
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: frontend
|
||||||
|
jobs:
|
||||||
|
sonar-analysis:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Sonar analysis
|
||||||
|
uses: sonarsource/sonarcloud-github-action@master
|
||||||
|
with:
|
||||||
|
projectBaseDir: frontend
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||||
|
|
||||||
19
.gitignore
vendored
19
.gitignore
vendored
@@ -1,3 +1,7 @@
|
|||||||
|
node_modules
|
||||||
|
yarn.lock
|
||||||
|
package.json
|
||||||
|
|
||||||
deploy/docker/environment_tiny/common_test
|
deploy/docker/environment_tiny/common_test
|
||||||
frontend/node_modules
|
frontend/node_modules
|
||||||
frontend/.pnp
|
frontend/.pnp
|
||||||
@@ -10,6 +14,9 @@ frontend/coverage
|
|||||||
frontend/build
|
frontend/build
|
||||||
frontend/.vscode
|
frontend/.vscode
|
||||||
frontend/.yarnclean
|
frontend/.yarnclean
|
||||||
|
frontend/.temp_cache
|
||||||
|
frontend/test-results
|
||||||
|
|
||||||
# misc
|
# misc
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.env.local
|
.env.local
|
||||||
@@ -25,8 +32,18 @@ frontend/src/constants/env.ts
|
|||||||
.idea
|
.idea
|
||||||
|
|
||||||
**/.vscode
|
**/.vscode
|
||||||
*.tgz
|
|
||||||
**/build
|
**/build
|
||||||
**/storage
|
**/storage
|
||||||
**/locust-scripts/__pycache__/
|
**/locust-scripts/__pycache__/
|
||||||
|
**/__debug_bin
|
||||||
|
|
||||||
|
frontend/*.env
|
||||||
|
pkg/query-service/signoz.db
|
||||||
|
|
||||||
|
pkg/query-service/tests/test-deploy/data/
|
||||||
|
|
||||||
|
|
||||||
|
# local data
|
||||||
|
|
||||||
|
/deploy/docker/clickhouse-setup/data/
|
||||||
|
/deploy/docker-swarm/clickhouse-setup/data/
|
||||||
|
|||||||
36
.gitpod.yml
Normal file
36
.gitpod.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
|
||||||
|
# and commit this file to your remote git repository to share the goodness with others.
|
||||||
|
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Run Script to Comment ut required lines
|
||||||
|
init: |
|
||||||
|
cd ./.scripts
|
||||||
|
sh commentLinesForSetup.sh
|
||||||
|
|
||||||
|
- name: Run Docker Images
|
||||||
|
init: |
|
||||||
|
cd ./deploy
|
||||||
|
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||||
|
# command:
|
||||||
|
|
||||||
|
- name: Run Frontend
|
||||||
|
init: |
|
||||||
|
cd ./frontend
|
||||||
|
yarn install
|
||||||
|
command:
|
||||||
|
yarn dev
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- port: 3301
|
||||||
|
onOpen: open-browser
|
||||||
|
- port: 8080
|
||||||
|
onOpen: ignore
|
||||||
|
- port: 9000
|
||||||
|
onOpen: ignore
|
||||||
|
- port: 8123
|
||||||
|
onOpen: ignore
|
||||||
|
- port: 8089
|
||||||
|
onOpen: ignore
|
||||||
|
- port: 9093
|
||||||
|
onOpen: ignore
|
||||||
7
.scripts/commentLinesForSetup.sh
Normal file
7
.scripts/commentLinesForSetup.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||||
|
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||||
|
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||||
|
|
||||||
|
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||||
375
CONTRIBUTING.md
375
CONTRIBUTING.md
@@ -1,9 +1,372 @@
|
|||||||
# How to Contribute
|
# Contributing Guidelines
|
||||||
|
|
||||||
You can always reach out to ankit@signoz.io to understand more about the repo and product. We are very responsive over email and [slack](https://signoz-community.slack.com/join/shared_invite/zt-kj26gm1u-Xe3CYxCu0bGXCrCqKipjOA#/).
|
## Welcome to SigNoz Contributing section 🎉
|
||||||
|
|
||||||
- You can create a PR (Pull Request)
|
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||||
- If you find any bugs, please create an issue
|
|
||||||
- If you find anything missing in documentation, you can create an issue with label **documentation**
|
|
||||||
|
|
||||||
#### If you want to build any new feature, please create an issue with label `enhancement`
|
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
|
||||||
|
|
||||||
|
- We accept contributions made to the [SigNoz `develop` branch]()
|
||||||
|
- Find all SigNoz Docker Hub images here
|
||||||
|
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
|
||||||
|
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
|
||||||
|
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
|
||||||
|
|
||||||
|
## Finding contributions to work on 💬
|
||||||
|
|
||||||
|
Looking at the existing issues is a great way to find something to contribute on.
|
||||||
|
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
|
||||||
|
|
||||||
|
|
||||||
|
## Sections:
|
||||||
|
- [General Instructions](#1-general-instructions-)
|
||||||
|
- [For Creating Issue(s)](#11-for-creating-issues)
|
||||||
|
- [For Pull Requests(s)](#12-for-pull-requests)
|
||||||
|
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
|
||||||
|
- [Develop Frontend](#3-develop-frontend-)
|
||||||
|
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
|
||||||
|
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
|
||||||
|
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
|
||||||
|
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||||
|
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||||
|
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||||
|
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||||
|
|
||||||
|
# 1. General Instructions 📝
|
||||||
|
|
||||||
|
## 1.1 For Creating Issue(s)
|
||||||
|
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||||
|
|
||||||
|
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||||
|
|
||||||
|
#### Details like these are incredibly useful:
|
||||||
|
|
||||||
|
- **Requirement** - what kind of use case are you trying to solve?
|
||||||
|
- **Proposal** - what do you suggest to solve the problem or improve the existing
|
||||||
|
situation?
|
||||||
|
- Any open questions to address❓
|
||||||
|
|
||||||
|
#### If you are reporting a bug, details like these are incredibly useful:
|
||||||
|
|
||||||
|
- A reproducible test case or series of steps.
|
||||||
|
- The version of our code being used.
|
||||||
|
- Any modifications you've made relevant to the bug🐞.
|
||||||
|
- Anything unusual about your environment or deployment.
|
||||||
|
|
||||||
|
Discussing your proposed changes ahead of time will make the contribution
|
||||||
|
process smooth for everyone 🙌.
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
## 1.2 For Pull Request(s)
|
||||||
|
|
||||||
|
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
|
||||||
|
Before sending us a pull request, please ensure that,
|
||||||
|
|
||||||
|
- Fork the SigNoz repo on GitHub, clone it on your machine.
|
||||||
|
- Create a branch with your changes.
|
||||||
|
- You are working against the latest source on the `develop` branch.
|
||||||
|
- Modify the source; please focus only on the specific change you are contributing.
|
||||||
|
- Ensure local tests pass.
|
||||||
|
- Commit to your fork using clear commit messages.
|
||||||
|
- Send us a pull request, answering any default questions in the pull request interface.
|
||||||
|
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
|
||||||
|
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
|
||||||
|
- Once the change has been approved and merged, we will inform you in a comment.
|
||||||
|
|
||||||
|
|
||||||
|
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||||
|
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
|
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
|
||||||
|
|
||||||
|
* 1️⃣ First PR should include the overall structure of the new component:
|
||||||
|
* Readme, configuration, interfaces or base classes, etc...
|
||||||
|
* This PR is usually trivial to review, so the size limit does not apply to
|
||||||
|
it.
|
||||||
|
* 2️⃣ Second PR should include the concrete implementation of the component. If the
|
||||||
|
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
|
||||||
|
multiple PRs.
|
||||||
|
* If there are multiple sub-component then ideally each one should be implemented as
|
||||||
|
a **separate** pull request.
|
||||||
|
* Last PR should include changes to **any user-facing documentation.** And should include
|
||||||
|
end-to-end tests if applicable. The component must be enabled
|
||||||
|
only after sufficient testing, and there is enough confidence in the
|
||||||
|
stability and quality of the component.
|
||||||
|
|
||||||
|
|
||||||
|
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
||||||
|
|
||||||
|
### Pointers:
|
||||||
|
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||||
|
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||||
|
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||||
|
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
### Conventions to follow when submitting Commits and Pull Request(s).
|
||||||
|
|
||||||
|
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||||
|
|
||||||
|
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
|
||||||
|
|
||||||
|
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
|
||||||
|
|
||||||
|
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
# 2. How to Contribute 🙋🏻♂️
|
||||||
|
|
||||||
|
#### There are primarily 2 areas in which you can contribute to SigNoz
|
||||||
|
|
||||||
|
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||||
|
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||||
|
|
||||||
|
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||||
|
|
||||||
|
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||||
|
|
||||||
|
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
# 3. Develop Frontend 🌚
|
||||||
|
|
||||||
|
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
|
||||||
|
|
||||||
|
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||||
|
|
||||||
|
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||||
|
|
||||||
|
- Clone the SigNoz repository and cd into signoz directory,
|
||||||
|
```
|
||||||
|
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||||
|
```
|
||||||
|
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
- run `cd deploy` to move to deploy directory,
|
||||||
|
- Install signoz locally **without** the frontend,
|
||||||
|
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
|
||||||
|
```
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
```
|
||||||
|
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
|
||||||
|
|
||||||
|
- Next run,
|
||||||
|
```
|
||||||
|
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||||
|
```
|
||||||
|
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||||
|
|
||||||
|
If you have backend api exposed via frontend nginx:
|
||||||
|
```
|
||||||
|
FRONTEND_API_ENDPOINT=http://localhost:3301
|
||||||
|
```
|
||||||
|
If not:
|
||||||
|
```
|
||||||
|
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||||
|
```
|
||||||
|
|
||||||
|
- Next,
|
||||||
|
```
|
||||||
|
yarn install
|
||||||
|
yarn dev
|
||||||
|
```
|
||||||
|
|
||||||
|
### Important Notes:
|
||||||
|
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||||
|
|
||||||
|
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||||
|
|
||||||
|
- Clone the SigNoz repository and cd into signoz/frontend directory,
|
||||||
|
```
|
||||||
|
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
|
||||||
|
````
|
||||||
|
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||||
|
- Next,
|
||||||
|
```
|
||||||
|
yarn install
|
||||||
|
yarn dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
|
||||||
|
|
||||||
|
**Frontend should now be accessible at** [`http://localhost:3301/application`](http://localhost:3301/application)
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
# 4. Contribute to Backend (Query-Service) 🌑
|
||||||
|
|
||||||
|
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
|
||||||
|
|
||||||
|
## 4.1 To run ClickHouse setup (recommended for local development)
|
||||||
|
|
||||||
|
- Clone the SigNoz repository and cd into signoz directory,
|
||||||
|
```
|
||||||
|
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||||
|
```
|
||||||
|
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||||
|
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||||
|
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||||
|
|
||||||
|
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
|
||||||
|
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||||
|
|
||||||
|
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
|
||||||
|
```
|
||||||
|
ports:
|
||||||
|
- 9001:9000
|
||||||
|
```
|
||||||
|
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
|
||||||
|
|
||||||
|
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||||
|
- Then, you need to create a `.env` file with the following environment variable
|
||||||
|
```
|
||||||
|
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
|
||||||
|
```
|
||||||
|
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
|
||||||
|
|
||||||
|
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||||
|
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||||
|
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
|
||||||
|
|
||||||
|
#### Run locally,
|
||||||
|
```
|
||||||
|
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Build and Run locally
|
||||||
|
```
|
||||||
|
cd pkg/query-service
|
||||||
|
go build -o build/query-service main.go
|
||||||
|
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Docker Images
|
||||||
|
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull signoz/query-service
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull signoz/query-service:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull signoz/query-service:develop
|
||||||
|
```
|
||||||
|
|
||||||
|
### Important Note:
|
||||||
|
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||||
|
|
||||||
|
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||||
|
|
||||||
|
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||||
|
|
||||||
|
Click the button below. A workspace with all required environments will be created.
|
||||||
|
|
||||||
|
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||||
|
|
||||||
|
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
# 5. Contribute to SigNoz Helm Chart 📊
|
||||||
|
|
||||||
|
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
|
||||||
|
|
||||||
|
## 5.1 To run helm chart for local development
|
||||||
|
|
||||||
|
- Clone the SigNoz repository and cd into charts directory,
|
||||||
|
```
|
||||||
|
git clone https://github.com/SigNoz/charts.git && cd charts
|
||||||
|
```
|
||||||
|
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||||
|
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||||
|
- [k3d](https://k3d.io/#installation)
|
||||||
|
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||||
|
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
|
||||||
|
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
|
||||||
|
- next run,
|
||||||
|
```
|
||||||
|
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
|
||||||
|
```
|
||||||
|
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||||
|
|
||||||
|
**5.1.1 To install the HotROD sample app:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||||
|
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||||
|
```
|
||||||
|
|
||||||
|
**5.1.2 To load data with the HotROD sample app:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
|
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||||
|
```
|
||||||
|
|
||||||
|
**5.1.3 To stop the load generation:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
|
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||||
|
http://locust-master:8089/stop
|
||||||
|
```
|
||||||
|
|
||||||
|
**5.1.4 To delete the HotROD sample app:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||||
|
| HOTROD_NAMESPACE=sample-application bash
|
||||||
|
```
|
||||||
|
|
||||||
|
**[`^top^`](#)**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Other Ways to Contribute
|
||||||
|
|
||||||
|
There are many other ways to get involved with the community and to participate in this project:
|
||||||
|
|
||||||
|
- Use the product, submitting GitHub issues when a problem is found.
|
||||||
|
- Help code review pull requests and participate in issue threads.
|
||||||
|
- Submit a new feature request as an issue.
|
||||||
|
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||||
|
- Tell others about the project on Twitter, your blog, etc.
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
By contributing to SigNoz, you agree that your contributions will be licensed under its MIT license.
|
||||||
|
|
||||||
|
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||||
|
|
||||||
|
Thank You!
|
||||||
|
|||||||
94
Makefile
Normal file
94
Makefile
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
#
|
||||||
|
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
|
||||||
|
#
|
||||||
|
|
||||||
|
# Build variables
|
||||||
|
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||||
|
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||||
|
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
|
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
|
|
||||||
|
# Internal variables or constants.
|
||||||
|
FRONTEND_DIRECTORY ?= frontend
|
||||||
|
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||||
|
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||||
|
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||||
|
|
||||||
|
REPONAME ?= signoz
|
||||||
|
DOCKER_TAG ?= latest
|
||||||
|
|
||||||
|
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||||
|
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||||
|
|
||||||
|
# Build-time Go variables
|
||||||
|
PACKAGE?=go.signoz.io/query-service
|
||||||
|
buildVersion=${PACKAGE}/version.buildVersion
|
||||||
|
buildHash=${PACKAGE}/version.buildHash
|
||||||
|
buildTime=${PACKAGE}/version.buildTime
|
||||||
|
gitBranch=${PACKAGE}/version.gitBranch
|
||||||
|
|
||||||
|
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||||
|
|
||||||
|
all: build-push-frontend build-push-query-service
|
||||||
|
# Steps to build and push docker image of frontend
|
||||||
|
.PHONY: build-frontend-amd64 build-push-frontend
|
||||||
|
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||||
|
build-frontend-amd64:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building frontend docker image for amd64"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
|
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
|
# Step to build and push docker image of frontend(used in push pipeline)
|
||||||
|
build-push-frontend:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building and pushing frontend docker image"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
|
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
|
||||||
|
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
|
# Steps to build and push docker image of query service
|
||||||
|
.PHONY: build-query-service-amd64 build-push-query-service
|
||||||
|
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||||
|
build-query-service-amd64:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building query-service docker image for amd64"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
|
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS) .
|
||||||
|
|
||||||
|
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
|
build-push-query-service:
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Building and pushing query-service docker image"
|
||||||
|
@echo "------------------"
|
||||||
|
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
|
docker buildx build --file Dockerfile --progress plane --no-cache \
|
||||||
|
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
|
||||||
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
|
dev-setup:
|
||||||
|
mkdir -p /var/lib/signoz
|
||||||
|
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
|
||||||
|
mkdir -p pkg/query-service/config/dashboards
|
||||||
|
@echo "------------------"
|
||||||
|
@echo "--> Local Setup completed"
|
||||||
|
@echo "------------------"
|
||||||
|
|
||||||
|
run-x86:
|
||||||
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up -d
|
||||||
|
|
||||||
|
down-x86:
|
||||||
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||||
|
|
||||||
|
clear-standalone-data:
|
||||||
|
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||||
|
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||||
|
|
||||||
|
clear-swarm-data:
|
||||||
|
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||||
|
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||||
160
README.de-de.md
Normal file
160
README.de-de.md
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
<p align="center">
|
||||||
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
|
|
||||||
|
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||||
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||||
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||||
|
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||||
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
##
|
||||||
|
|
||||||
|
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
|
||||||
|
|
||||||
|
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
|
||||||
|
|
||||||
|
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
|
||||||
|
|
||||||
|
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## Werde Teil unserer Slack Community
|
||||||
|
|
||||||
|
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||||
|
|
||||||
|
## Funktionen:
|
||||||
|
|
||||||
|
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
|
||||||
|
- Übersicht der langsamsten Endpunkte deiner Anwendung.
|
||||||
|
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
|
||||||
|
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
|
||||||
|
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
|
||||||
|
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||||
|
|
||||||
|
## Wieso SigNoz?
|
||||||
|
|
||||||
|
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
||||||
|
|
||||||
|
Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDog, NewRelic für Firmen anbieten, die Datenschutz und Sicherheitsbedenken haben, bei der Weitergabe von Kundendaten an Drittanbieter.
|
||||||
|
|
||||||
|
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
|
||||||
|
|
||||||
|
### Unterstützte Programmiersprachen:
|
||||||
|
|
||||||
|
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
|
||||||
|
|
||||||
|
- Java
|
||||||
|
- Python
|
||||||
|
- NodeJS
|
||||||
|
- Go
|
||||||
|
|
||||||
|
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||||
|
|
||||||
|
## Erste Schritte mit SigNoz
|
||||||
|
|
||||||
|
|
||||||
|
### Bereitstellung mit Docker
|
||||||
|
|
||||||
|
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||||
|
|
||||||
|
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
|
||||||
|
### Bereitstellung mit Kubernetes und Helm
|
||||||
|
|
||||||
|
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
|
||||||
|
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||||
|
|
||||||
|
## Vergleiche mit anderen Lösungen
|
||||||
|
|
||||||
|
### SigNoz vs. Prometheus
|
||||||
|
|
||||||
|
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
|
||||||
|
|
||||||
|
Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzelschritt-Fehlersuchen anzubieten, ähnlich wie es SaaS Anbieter wie Datadog tun, mit der Möglichkeit von erweitertem filtern und aggregieren von Fehlersuchen. Etwas, was in Jaeger aktuell fehlt.
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs. Jaeger
|
||||||
|
|
||||||
|
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
|
||||||
|
|
||||||
|
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
|
||||||
|
|
||||||
|
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
|
||||||
|
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||||
|
|
||||||
|
## Zum Projekt beitragen
|
||||||
|
|
||||||
|
|
||||||
|
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||||
|
|
||||||
|
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||||
|
|
||||||
|
## Dokumentation
|
||||||
|
|
||||||
|
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## Community
|
||||||
|
|
||||||
|
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||||
|
|
||||||
|
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
|
||||||
|
|
||||||
|
Wie immer, danke an unsere großartigen Unterstützer!
|
||||||
|
|
||||||
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
152
README.md
152
README.md
@@ -6,34 +6,64 @@
|
|||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNoz_io&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||||
|
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||||
|
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||||
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
SigNoz helps developer monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
||||||
|
|
||||||
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
|
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
|
||||||
|
|
||||||
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
|
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
|
||||||
|
|
||||||
<!--  -->
|
👉 Run aggregates on trace data to get business relevant metrics
|
||||||
|
|
||||||

|

|
||||||
|
<br />
|
||||||
|

|
||||||
|
<br />
|
||||||
|

|
||||||
|
|
||||||
### 👇 Features:
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## Join our Slack community
|
||||||
|
|
||||||
|
Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||||
|
|
||||||
|
## Features:
|
||||||
|
|
||||||
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
|
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
|
||||||
- Slowest endpoints in your application
|
- Slowest endpoints in your application
|
||||||
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
|
||||||
- Filter traces by service name, operation, latency, error, tags/annotations.
|
- Filter traces by service name, operation, latency, error, tags/annotations.
|
||||||
- Aggregate metrics on filtered traces. Eg, you can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||||
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
|
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
|
||||||
|
|
||||||
### 🤓 Why SigNoz?
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||||
|
|
||||||
|
## Why SigNoz?
|
||||||
|
|
||||||
Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency.
|
Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency.
|
||||||
|
|
||||||
@@ -41,7 +71,7 @@ We wanted to make a self-hosted & open source version of tools like DataDog, New
|
|||||||
|
|
||||||
Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities
|
Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities
|
||||||
|
|
||||||
### 👊🏻 Languages supported:
|
### Languages supported:
|
||||||
|
|
||||||
We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are:
|
We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are:
|
||||||
|
|
||||||
@@ -52,54 +82,96 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
|
|||||||
|
|
||||||
You can find the complete list of languages here - https://opentelemetry.io/docs/
|
You can find the complete list of languages here - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
# Getting Started
|
<br /><br />
|
||||||
|
|
||||||
## Deploy using docker-compose
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||||
|
|
||||||
We have a tiny-cluster setup and a standard setup to deploy using docker-compose.
|
## Getting Started
|
||||||
Follow the steps listed at https://signoz.io/docs/deployment/docker/.
|
|
||||||
The troubleshooting instructions at https://signoz.io/docs/deployment/docker/#troubleshooting may be helpful
|
|
||||||
|
|
||||||
## Deploy in Kubernetes using Helm
|
### Deploy using Docker
|
||||||
|
|
||||||
Below steps will install the SigNoz in `platform` namespace inside your k8s cluster.
|
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
|
||||||
|
|
||||||
```console
|
The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshooting) may be helpful if you face any issues.
|
||||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
|
||||||
helm dependency update deploy/kubernetes/platform
|
|
||||||
kubectl create ns platform
|
|
||||||
helm -n platform install signoz deploy/kubernetes/platform
|
|
||||||
kubectl -n platform apply -Rf deploy/kubernetes/jobs
|
|
||||||
kubectl -n platform apply -f deploy/kubernetes/otel-collector
|
|
||||||
```
|
|
||||||
|
|
||||||
\*_You can choose a different namespace too. In that case, you need to point your applications to correct address to send traces. In our sample application just change the `JAEGER_ENDPOINT` environment variable in `sample-apps/hotrod/deployment.yaml`_
|
<p>  </p>
|
||||||
|
|
||||||
### Test HotROD application with SigNoz
|
|
||||||
|
|
||||||
```console
|
### Deploy in Kubernetes using Helm
|
||||||
kubectl create ns sample-application
|
|
||||||
kubectl -n sample-application apply -Rf sample-apps/hotrod/
|
|
||||||
```
|
|
||||||
|
|
||||||
### How to generate load
|
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
|
||||||
|
|
||||||
`kubectl -n sample-application run strzal --image=djbingham/curl --restart='OnFailure' -i --tty --rm --command -- curl -X POST -F 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm`
|
<br /><br />
|
||||||
|
|
||||||
### See UI
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||||
|
|
||||||
`kubectl -n platform port-forward svc/signoz-frontend 3000:3000`
|
## Comparisons to Familiar Tools
|
||||||
|
|
||||||
### How to stop load
|
### SigNoz vs Prometheus
|
||||||
|
|
||||||
`kubectl -n sample-application run strzal --image=djbingham/curl --restart='OnFailure' -i --tty --rm --command -- curl http://locust-master:8089/stop`
|
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
|
||||||
|
|
||||||
# Documentation
|
Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
|
||||||
|
|
||||||
You can find docs at https://signoz.io/docs/deployment/docker. If you need any clarification or find something missing, feel free to raise a GitHub issue with the label `documentation` or reach out to us at the community slack channel.
|
<p>  </p>
|
||||||
|
|
||||||
# Community
|
### SigNoz vs Jaeger
|
||||||
|
|
||||||
Join the [slack community](https://app.slack.com/client/T01HWUTP0LT#/) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
|
Jaeger only does distributed tracing. SigNoz does both metrics and traces, and we also have log management in our roadmap.
|
||||||
|
|
||||||
|
Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||||
|
|
||||||
|
- Jaegar UI doesn’t show any metrics on traces or on filtered traces
|
||||||
|
- Jaeger can’t get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||||
|
|
||||||
|
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||||
|
|
||||||
|
### Project maintainers
|
||||||
|
|
||||||
|
#### Backend
|
||||||
|
|
||||||
|
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||||
|
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||||
|
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||||
|
- [Vishal Sharma](https://github.com/makeavish)
|
||||||
|
|
||||||
|
#### Frontend
|
||||||
|
|
||||||
|
- [Palash Gupta](https://github.com/palashgdev)
|
||||||
|
- [Pranshu Chittora](https://github.com/pranshuchittora)
|
||||||
|
|
||||||
|
#### DevOps
|
||||||
|
|
||||||
|
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
You can find docs at https://signoz.io/docs/. If you need any clarification or find something missing, feel free to raise a GitHub issue with the label `documentation` or reach out to us at the community slack channel.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## Community
|
||||||
|
|
||||||
|
Join the [slack community](https://signoz.io/slack) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
|
||||||
|
|
||||||
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
|
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
|
||||||
|
|
||||||
|
As always, thanks to our amazing contributors!
|
||||||
|
|
||||||
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
|
</a>
|
||||||
|
|||||||
159
README.pt-br.md
Normal file
159
README.pt-br.md
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
<p align="center">
|
||||||
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
|
|
||||||
|
<p align="center">Monitore seus aplicativos e solucione problemas em seus aplicativos implantados, uma alternativa de código aberto para soluções como DataDog, New Relic, entre outras.</p>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||||
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||||
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://signoz.io/docs"><b>Documentação</b></a> •
|
||||||
|
<a href="https://signoz.io/slack"><b>Comunidade no Slack</b></a> •
|
||||||
|
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
##
|
||||||
|
|
||||||
|
SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problemas em seus aplicativos implantados. SigNoz usa rastreamento distribuído para obter visibilidade em sua pilha de software.
|
||||||
|
|
||||||
|
👉 Você pode verificar métricas como latência p99, taxas de erro em seus serviços, requisições às APIs externas e endpoints individuais.
|
||||||
|
|
||||||
|
👉 Você pode encontrar a causa raiz do problema acessando os rastreamentos exatos que estão causando o problema e verificar os quadros detalhados de cada requisição individual.
|
||||||
|
|
||||||
|
👉 Execute agregações em dados de rastreamento para obter métricas de negócios relevantes.
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## Junte-se à nossa comunidade no Slack
|
||||||
|
|
||||||
|
Venha dizer oi para nós no [Slack](https://signoz.io/slack) 👋
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||||
|
|
||||||
|
## Funções:
|
||||||
|
|
||||||
|
- Métricas de visão geral do aplicativo, como RPS, latências de percentual 50/90/99 e taxa de erro
|
||||||
|
- Endpoints mais lentos em seu aplicativo
|
||||||
|
- Visualize o rastreamento preciso de requisições de rede para descobrir problemas em serviços downstream, consultas lentas de banco de dados, chamadas para serviços de terceiros, como gateways de pagamento, etc.
|
||||||
|
- Filtre os rastreamentos por nome de serviço, operação, latência, erro, tags / anotações.
|
||||||
|
- Execute agregações em dados de rastreamento (eventos / extensões) para obter métricas de negócios relevantes, como por exemplo, você pode obter a taxa de erro e a latência do 99º percentil de `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
|
||||||
|
- Interface de Usuário unificada para métricas e rastreios. Não há necessidade de mudar de Prometheus para Jaeger para depurar problemas.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||||
|
|
||||||
|
## Por que escolher SigNoz?
|
||||||
|
|
||||||
|
Sendo desenvolvedores, achamos irritante contar com fornecedores de SaaS de código fechado para cada pequeno recurso que queríamos. Fornecedores de código fechado costumam surpreendê-lo com enormes contas no final do mês de uso sem qualquer transparência .
|
||||||
|
|
||||||
|
Queríamos fazer uma versão auto-hospedada e de código aberto de ferramentas como DataDog, NewRelic para empresas que têm preocupações com privacidade e segurança em ter dados de clientes indo para serviços de terceiros.
|
||||||
|
|
||||||
|
Ser open source também oferece controle completo de sua configuração, amostragem e tempos de atividade. Você também pode construir módulos sobre o SigNoz para estender recursos específicos do negócio.
|
||||||
|
|
||||||
|
### Linguagens Suportadas:
|
||||||
|
|
||||||
|
Nós apoiamos a biblioteca [OpenTelemetry](https://opentelemetry.io) como a biblioteca que você pode usar para instrumentar seus aplicativos. Em outras palavras, SigNoz oferece suporte a qualquer framework e linguagem que suporte a biblioteca OpenTelemetry. As principais linguagens suportadas incluem:
|
||||||
|
|
||||||
|
- Java
|
||||||
|
- Python
|
||||||
|
- NodeJS
|
||||||
|
- Go
|
||||||
|
|
||||||
|
Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||||
|
|
||||||
|
## Iniciando
|
||||||
|
|
||||||
|
|
||||||
|
### Implantar usando Docker
|
||||||
|
|
||||||
|
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
|
||||||
|
|
||||||
|
Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
|
||||||
|
### Implentar no Kubernetes usando Helm
|
||||||
|
|
||||||
|
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/helm_chart) para instalar usando helm charts.
|
||||||
|
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||||
|
|
||||||
|
## Comparações com ferramentas similares
|
||||||
|
|
||||||
|
### SigNoz ou Prometheus
|
||||||
|
|
||||||
|
Prometheus é bom se você quiser apenas fazer métricas. Mas se você quiser ter uma experiência perfeita entre métricas e rastreamentos, a experiência atual de unir Prometheus e Jaeger não é ótima.
|
||||||
|
|
||||||
|
Nosso objetivo é fornecer uma interface do usuário integrada entre métricas e rastreamentos - semelhante ao que fornecedores de SaaS como o Datadog fornecem - e fornecer filtragem e agregação avançada sobre rastreamentos, algo que a Jaeger atualmente carece.
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz ou Jaeger
|
||||||
|
|
||||||
|
Jaeger só faz rastreamento distribuído. SigNoz faz métricas e rastreia, e também temos gerenciamento de log em nossos planos.
|
||||||
|
|
||||||
|
Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger:
|
||||||
|
|
||||||
|
- A interface de usuário do Jaegar não mostra nenhuma métrica em traces ou em traces filtrados
|
||||||
|
- Jaeger não pode obter agregados em rastros filtrados. Por exemplo, latência p99 de solicitações que possuem tag - customer_type='premium'. Isso pode ser feito facilmente com SigNoz.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||||
|
|
||||||
|
## Contribuindo
|
||||||
|
|
||||||
|
|
||||||
|
Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz.
|
||||||
|
|
||||||
|
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://signoz.io/slack)
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||||
|
|
||||||
|
## Documentação
|
||||||
|
|
||||||
|
Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver alguma dúvida ou sentir falta de algo, sinta-se à vontade para criar uma issue com a tag `documentation` no GitHub ou entre em contato conosco no canal da comunidade no Slack.
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## Comunidade
|
||||||
|
|
||||||
|
Junte-se a [comunidade no Slack](https://signoz.io/slack) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
|
||||||
|
|
||||||
|
Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions)
|
||||||
|
|
||||||
|
Como sempre, obrigado aos nossos incríveis colaboradores!
|
||||||
|
|
||||||
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
150
README.zh-cn.md
Normal file
150
README.zh-cn.md
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
<p align="center">
|
||||||
|
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||||
|
|
||||||
|
<p align="center">监视你的应用,并可排查已部署应用中的问题,这是一个开源的可替代DataDog、NewRelic的方案</p>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||||
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||||
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
##
|
||||||
|
|
||||||
|
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式跟踪来增加软件技术栈的可见性。
|
||||||
|
|
||||||
|
👉 你能看到一些性能矩阵,服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
|
||||||
|
|
||||||
|
👉 通过准确的跟踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
|
||||||
|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## 加入我们的Slack社区
|
||||||
|
|
||||||
|
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||||
|
|
||||||
|
## 功能:
|
||||||
|
|
||||||
|
- 应用总览矩阵(matrix),如RPS, 50/90/99百分比延迟率,错误率
|
||||||
|
- 应用中最慢的终端(endpoint)
|
||||||
|
- 查看准确的网络请求跟踪来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
|
||||||
|
- 通过服务名称、操作、延迟、错误、标签来过滤跟踪
|
||||||
|
- 对过滤后的跟踪数据做矩阵聚合。比如,获得过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal`的错误率和p99延迟
|
||||||
|
- 整合的矩阵和跟踪用户界面。不需要像从Prometheus切换到Jaeger才能调试问题
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||||
|
|
||||||
|
## 为何选择SigNoz?
|
||||||
|
|
||||||
|
作为开发人员,我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦,闭源厂商通常会给你一份巨额月付账单,但不提供足够的透明度,你不知道你为哪些功能付费。
|
||||||
|
|
||||||
|
我们想做一个自服务的开源版本的工具,类似于DataDog和NewRelic,用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
|
||||||
|
|
||||||
|
开源也让你对配置、采样和上线率有完整的控制,你可以在SigNoz基础上构建模块来满足特定的商业需求。
|
||||||
|
|
||||||
|
### 语言支持
|
||||||
|
|
||||||
|
我们支持[OpenTelemetry](https://opentelemetry.io)库,你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
|
||||||
|
|
||||||
|
- Java
|
||||||
|
- Python
|
||||||
|
- NodeJS
|
||||||
|
- Go
|
||||||
|
|
||||||
|
你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||||
|
|
||||||
|
## 入门
|
||||||
|
|
||||||
|
|
||||||
|
### 使用Docker部署
|
||||||
|
|
||||||
|
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
|
||||||
|
|
||||||
|
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
|
||||||
|
### 使用Helm在Kubernetes上部署
|
||||||
|
|
||||||
|
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
|
||||||
|
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||||
|
|
||||||
|
## Comparisons to Familiar Tools
|
||||||
|
|
||||||
|
### SigNoz vs Prometheus
|
||||||
|
|
||||||
|
如果你只是需要矩阵,那Prometheus是不错的,但如果你要无缝的在矩阵和跟踪之间切换,那目前把Prometheus & Jaeger串起来的体验并不好。
|
||||||
|
|
||||||
|
我们的目标是在矩阵和跟踪之间提供整合的UI - 类似于Datadog这样的Saas厂提供的方案,能够对跟踪进行过滤和聚合,这是目前Jaeger缺失的功能。
|
||||||
|
|
||||||
|
<p>  </p>
|
||||||
|
|
||||||
|
### SigNoz vs Jaeger
|
||||||
|
|
||||||
|
Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们在计划中也有日志管理功能。
|
||||||
|
|
||||||
|
并且SigNoz有一些Jaeger没有的高级功能:
|
||||||
|
|
||||||
|
- Jaegar UI无法在跟踪或过滤的跟踪基础上展示矩阵。
|
||||||
|
- Jaeger不能在过滤的跟踪上进行聚合操作。例如,拥有tag为customer_type='premium'的所有请求的p99延迟,在SigNoz里这很容易实现。
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||||
|
|
||||||
|
## 贡献
|
||||||
|
|
||||||
|
|
||||||
|
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
|
||||||
|
|
||||||
|
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||||
|
|
||||||
|
## 文档
|
||||||
|
|
||||||
|
文档在这里:https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失,请在Github里发一个问题,并使用标签 `documentation` 或者在社区stack频道里告诉我们。
|
||||||
|
|
||||||
|
<br /><br />
|
||||||
|
|
||||||
|
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||||
|
|
||||||
|
## 社区
|
||||||
|
|
||||||
|
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
|
||||||
|
|
||||||
|
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。
|
||||||
|
|
||||||
|
最后,感谢我们这些优秀的贡献者们。
|
||||||
|
|
||||||
|
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||||
|
</a>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||||
|
|
||||||
|
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||||
|
|
||||||
|
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
Thank you for keeping SigNoz and our users safe. 🙇
|
||||||
88
deploy/README.md
Normal file
88
deploy/README.md
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
# Deploy
|
||||||
|
|
||||||
|
Check that you have cloned [signoz/signoz](https://github.com/signoz/signoz)
|
||||||
|
and currently are in `signoz/deploy` folder.
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
If you don't have docker set up, please follow [this guide](https://docs.docker.com/engine/install/)
|
||||||
|
to set up docker before proceeding with the next steps.
|
||||||
|
|
||||||
|
### Using Install Script
|
||||||
|
|
||||||
|
Now run the following command to install:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./install.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Docker Compose
|
||||||
|
|
||||||
|
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
|
||||||
|
to set up docker compose before proceeding with the next steps.
|
||||||
|
|
||||||
|
For x86 chip (amd):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
For Mac with Apple chip (arm):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
|
||||||
|
the data generated from hotrod in SigNoz UI.
|
||||||
|
|
||||||
|
## Kubernetes
|
||||||
|
|
||||||
|
### Using Helm
|
||||||
|
|
||||||
|
#### Bring up SigNoz cluster
|
||||||
|
|
||||||
|
```sh
|
||||||
|
helm repo add signoz https://charts.signoz.io
|
||||||
|
|
||||||
|
kubectl create ns platform
|
||||||
|
|
||||||
|
helm -n platform install my-release signoz/signoz
|
||||||
|
```
|
||||||
|
|
||||||
|
To access the UI, you can `port-forward` the frontend service:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
|
||||||
|
```
|
||||||
|
|
||||||
|
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
|
||||||
|
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
|
||||||
|
|
||||||
|
#### Test HotROD application with SigNoz
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create ns sample-application
|
||||||
|
|
||||||
|
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
To generate load:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
|
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||||
|
```
|
||||||
|
|
||||||
|
To stop load:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
|
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||||
|
http://locust-master:8089/stop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall/Troubleshoot?
|
||||||
|
|
||||||
|
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.
|
||||||
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
global:
|
||||||
|
resolve_timeout: 1m
|
||||||
|
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||||
|
|
||||||
|
route:
|
||||||
|
receiver: 'slack-notifications'
|
||||||
|
|
||||||
|
receivers:
|
||||||
|
- name: 'slack-notifications'
|
||||||
|
slack_configs:
|
||||||
|
- channel: '#alerts'
|
||||||
|
send_resolved: true
|
||||||
|
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||||
|
title: |-
|
||||||
|
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||||
|
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||||
|
{{" "}}(
|
||||||
|
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||||
|
{{- range $index, $label := .SortedPairs -}}
|
||||||
|
{{ if $index }}, {{ end }}
|
||||||
|
{{- $label.Name }}="{{ $label.Value -}}"
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
)
|
||||||
|
{{- end }}
|
||||||
|
text: >-
|
||||||
|
{{ range .Alerts -}}
|
||||||
|
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||||
|
|
||||||
|
*Description:* {{ .Annotations.description }}
|
||||||
|
|
||||||
|
*Details:*
|
||||||
|
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}
|
||||||
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
groups:
|
||||||
|
- name: ExampleCPULoadGroup
|
||||||
|
rules:
|
||||||
|
- alert: HighCpuLoad
|
||||||
|
expr: system_cpu_load_average_1m > 0.1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: High CPU load
|
||||||
|
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
1304
deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
Normal file
1304
deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
Normal file
File diff suppressed because it is too large
Load Diff
29
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
29
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<default>
|
||||||
|
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<type>s3</type>
|
||||||
|
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||||
|
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||||
|
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||||
|
</s3>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<tiered>
|
||||||
|
<volumes>
|
||||||
|
<default>
|
||||||
|
<disk>default</disk>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<disk>s3</disk>
|
||||||
|
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||||
|
</s3>
|
||||||
|
</volumes>
|
||||||
|
</tiered>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
</clickhouse>
|
||||||
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||||
|
|
||||||
|
<!-- Profiles of settings. -->
|
||||||
|
<profiles>
|
||||||
|
<!-- Default settings. -->
|
||||||
|
<default>
|
||||||
|
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||||
|
<max_memory_usage>10000000000</max_memory_usage>
|
||||||
|
|
||||||
|
<!-- How to choose between replicas during distributed query processing.
|
||||||
|
random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
with minimum number of different symbols between replica's hostname and local hostname
|
||||||
|
(Hamming distance).
|
||||||
|
in_order - first live replica is chosen in specified order.
|
||||||
|
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
-->
|
||||||
|
<load_balancing>random</load_balancing>
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Profile that allows only read queries. -->
|
||||||
|
<readonly>
|
||||||
|
<readonly>1</readonly>
|
||||||
|
</readonly>
|
||||||
|
</profiles>
|
||||||
|
|
||||||
|
<!-- Users and ACL. -->
|
||||||
|
<users>
|
||||||
|
<!-- If user name was not specified, 'default' user is used. -->
|
||||||
|
<default>
|
||||||
|
<!-- See also the files in users.d directory where the password can be overridden.
|
||||||
|
|
||||||
|
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
|
||||||
|
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
Example: <password>qwerty</password>.
|
||||||
|
Password could be empty.
|
||||||
|
|
||||||
|
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||||
|
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
|
||||||
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||||
|
place its name in 'server' element inside 'ldap' element.
|
||||||
|
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||||
|
|
||||||
|
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||||
|
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||||
|
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||||
|
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||||
|
whose initiator's realm matches it.
|
||||||
|
Example: <kerberos />
|
||||||
|
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||||
|
|
||||||
|
How to generate decent password:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
|
||||||
|
How to generate double SHA1:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding double SHA1.
|
||||||
|
-->
|
||||||
|
<password></password>
|
||||||
|
|
||||||
|
<!-- List of networks with open access.
|
||||||
|
|
||||||
|
To open access from everywhere, specify:
|
||||||
|
<ip>::/0</ip>
|
||||||
|
|
||||||
|
To open access only from localhost, specify:
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
|
||||||
|
Each element of list has one of the following forms:
|
||||||
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
<host> Hostname. Example: server01.clickhouse.com.
|
||||||
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
Strongly recommended that regexp is ends with $
|
||||||
|
All results of DNS requests are cached till server restart.
|
||||||
|
-->
|
||||||
|
<networks>
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
|
||||||
|
<!-- Settings profile for user. -->
|
||||||
|
<profile>default</profile>
|
||||||
|
|
||||||
|
<!-- Quota for user. -->
|
||||||
|
<quota>default</quota>
|
||||||
|
|
||||||
|
<!-- User can create other users and grant rights to them. -->
|
||||||
|
<!-- <access_management>1</access_management> -->
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
|
||||||
|
<!-- Quotas. -->
|
||||||
|
<quotas>
|
||||||
|
<!-- Name of quota. -->
|
||||||
|
<default>
|
||||||
|
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||||
|
<interval>
|
||||||
|
<!-- Length of interval. -->
|
||||||
|
<duration>3600</duration>
|
||||||
|
|
||||||
|
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||||
|
<queries>0</queries>
|
||||||
|
<errors>0</errors>
|
||||||
|
<result_rows>0</result_rows>
|
||||||
|
<read_rows>0</read_rows>
|
||||||
|
<execution_time>0</execution_time>
|
||||||
|
</interval>
|
||||||
|
</default>
|
||||||
|
</quotas>
|
||||||
|
</clickhouse>
|
||||||
151
deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
Normal file
151
deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
|
services:
|
||||||
|
clickhouse:
|
||||||
|
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||||
|
# ports:
|
||||||
|
# - "9000:9000"
|
||||||
|
# - "8123:8123"
|
||||||
|
tty: true
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: signoz/alertmanager:0.23.0-0.2
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
|
command:
|
||||||
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
depends_on:
|
||||||
|
- query-service
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
query-service:
|
||||||
|
image: signoz/query-service:0.10.2
|
||||||
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
|
- ../dashboards:/root/config/dashboards
|
||||||
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
|
environment:
|
||||||
|
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||||
|
- STORAGE=clickhouse
|
||||||
|
- GODEBUG=netdns=go
|
||||||
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-swarm
|
||||||
|
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
image: signoz/frontend:0.10.2
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
|
- query-service
|
||||||
|
ports:
|
||||||
|
- "3301:3301"
|
||||||
|
volumes:
|
||||||
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector:
|
||||||
|
image: signoz/otelcontribcol:0.45.1-1.3
|
||||||
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # Health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 3
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2000m
|
||||||
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
|
||||||
|
otel-collector-metrics:
|
||||||
|
image: signoz/otelcontribcol:0.45.1-1.3
|
||||||
|
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
|
# ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "13133:13133" # Health check extension
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
|
||||||
|
hotrod:
|
||||||
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
command: ["all"]
|
||||||
|
environment:
|
||||||
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
load-hotrod:
|
||||||
|
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||||
|
hostname: load-hotrod
|
||||||
|
environment:
|
||||||
|
ATTACKED_HOST: http://hotrod:8080
|
||||||
|
LOCUST_MODE: standalone
|
||||||
|
NO_PROXY: standalone
|
||||||
|
TASK_DELAY_FROM: 5
|
||||||
|
TASK_DELAY_TO: 30
|
||||||
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
|
volumes:
|
||||||
|
- ../common/locust-scripts:/locust
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS signoz_index (
|
||||||
|
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
|
||||||
|
traceID String CODEC(ZSTD(1)),
|
||||||
|
spanID String CODEC(ZSTD(1)),
|
||||||
|
parentSpanID String CODEC(ZSTD(1)),
|
||||||
|
serviceName LowCardinality(String) CODEC(ZSTD(1)),
|
||||||
|
name LowCardinality(String) CODEC(ZSTD(1)),
|
||||||
|
kind Int32 CODEC(ZSTD(1)),
|
||||||
|
durationNano UInt64 CODEC(ZSTD(1)),
|
||||||
|
tags Array(String) CODEC(ZSTD(1)),
|
||||||
|
tagsKeys Array(String) CODEC(ZSTD(1)),
|
||||||
|
tagsValues Array(String) CODEC(ZSTD(1)),
|
||||||
|
statusCode Int64 CODEC(ZSTD(1)),
|
||||||
|
references String CODEC(ZSTD(1)),
|
||||||
|
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
component Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
dbSystem Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||||
|
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
|
||||||
|
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
|
||||||
|
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
|
||||||
|
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
|
||||||
|
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||||
|
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||||
|
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||||
|
) ENGINE MergeTree()
|
||||||
|
PARTITION BY toDate(timestamp)
|
||||||
|
ORDER BY (serviceName, -toUnixTimestamp(timestamp))
|
||||||
108
deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml
Normal file
108
deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
receivers:
|
||||||
|
opencensus:
|
||||||
|
endpoint: 0.0.0.0:55678
|
||||||
|
otlp/spanmetrics:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: localhost:12345
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: 0.0.0.0:4317
|
||||||
|
http:
|
||||||
|
endpoint: 0.0.0.0:4318
|
||||||
|
jaeger:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: 0.0.0.0:14250
|
||||||
|
thrift_http:
|
||||||
|
endpoint: 0.0.0.0:14268
|
||||||
|
# thrift_compact:
|
||||||
|
# endpoint: 0.0.0.0:6831
|
||||||
|
# thrift_binary:
|
||||||
|
# endpoint: 0.0.0.0:6832
|
||||||
|
hostmetrics:
|
||||||
|
collection_interval: 60s
|
||||||
|
scrapers:
|
||||||
|
cpu: {}
|
||||||
|
load: {}
|
||||||
|
memory: {}
|
||||||
|
disk: {}
|
||||||
|
filesystem: {}
|
||||||
|
network: {}
|
||||||
|
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
|
timeout: 10s
|
||||||
|
resourcedetection:
|
||||||
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
|
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||||
|
timeout: 2s
|
||||||
|
override: false
|
||||||
|
signozspanmetrics/prometheus:
|
||||||
|
metrics_exporter: prometheus
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 10000
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# memory_limiter:
|
||||||
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
|
# queued_retry:
|
||||||
|
# num_workers: 4
|
||||||
|
# queue_size: 100
|
||||||
|
# retry_on_failure: true
|
||||||
|
|
||||||
|
exporters:
|
||||||
|
clickhousetraces:
|
||||||
|
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||||
|
clickhousemetricswrite:
|
||||||
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
|
resource_to_telemetry_conversion:
|
||||||
|
enabled: true
|
||||||
|
prometheus:
|
||||||
|
endpoint: 0.0.0.0:8889
|
||||||
|
# logging: {}
|
||||||
|
|
||||||
|
extensions:
|
||||||
|
health_check:
|
||||||
|
endpoint: 0.0.0.0:13133
|
||||||
|
zpages:
|
||||||
|
endpoint: 0.0.0.0:55679
|
||||||
|
pprof:
|
||||||
|
endpoint: 0.0.0.0:1777
|
||||||
|
|
||||||
|
service:
|
||||||
|
telemetry:
|
||||||
|
metrics:
|
||||||
|
address: 0.0.0.0:8888
|
||||||
|
extensions: [health_check, zpages, pprof]
|
||||||
|
pipelines:
|
||||||
|
traces:
|
||||||
|
receivers: [jaeger, otlp]
|
||||||
|
processors: [signozspanmetrics/prometheus, batch]
|
||||||
|
exporters: [clickhousetraces]
|
||||||
|
metrics:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
|
metrics/hostmetrics:
|
||||||
|
receivers: [hostmetrics]
|
||||||
|
processors: [resourcedetection, batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
|
metrics/spanmetrics:
|
||||||
|
receivers: [otlp/spanmetrics]
|
||||||
|
exporters: [prometheus]
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
receivers:
|
||||||
|
prometheus:
|
||||||
|
config:
|
||||||
|
scrape_configs:
|
||||||
|
# otel-collector internal metrics
|
||||||
|
- job_name: "otel-collector"
|
||||||
|
scrape_interval: 60s
|
||||||
|
dns_sd_configs:
|
||||||
|
- names:
|
||||||
|
- 'tasks.otel-collector'
|
||||||
|
type: 'A'
|
||||||
|
port: 8888
|
||||||
|
# otel-collector-metrics internal metrics
|
||||||
|
- job_name: "otel-collector-metrics"
|
||||||
|
scrape_interval: 60s
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- localhost:8888
|
||||||
|
# SigNoz span metrics
|
||||||
|
- job_name: "signozspanmetrics-collector"
|
||||||
|
scrape_interval: 60s
|
||||||
|
dns_sd_configs:
|
||||||
|
- names:
|
||||||
|
- 'tasks.otel-collector'
|
||||||
|
type: 'A'
|
||||||
|
port: 8889
|
||||||
|
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
|
timeout: 10s
|
||||||
|
# memory_limiter:
|
||||||
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
|
# queued_retry:
|
||||||
|
# num_workers: 4
|
||||||
|
# queue_size: 100
|
||||||
|
# retry_on_failure: true
|
||||||
|
|
||||||
|
exporters:
|
||||||
|
clickhousemetricswrite:
|
||||||
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
|
|
||||||
|
extensions:
|
||||||
|
health_check:
|
||||||
|
endpoint: 0.0.0.0:13133
|
||||||
|
zpages:
|
||||||
|
endpoint: 0.0.0.0:55679
|
||||||
|
pprof:
|
||||||
|
endpoint: 0.0.0.0:1777
|
||||||
|
|
||||||
|
service:
|
||||||
|
telemetry:
|
||||||
|
metrics:
|
||||||
|
address: 0.0.0.0:8888
|
||||||
|
extensions: [health_check, zpages, pprof]
|
||||||
|
pipelines:
|
||||||
|
metrics:
|
||||||
|
receivers: [prometheus]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
26
deploy/docker-swarm/clickhouse-setup/prometheus.yml
Normal file
26
deploy/docker-swarm/clickhouse-setup/prometheus.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# my global config
|
||||||
|
global:
|
||||||
|
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||||
|
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
# Alertmanager configuration
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- static_configs:
|
||||||
|
- targets:
|
||||||
|
- alertmanager:9093
|
||||||
|
|
||||||
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||||
|
rule_files:
|
||||||
|
# - "first_rules.yml"
|
||||||
|
# - "second_rules.yml"
|
||||||
|
- 'alerts.yml'
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
|
||||||
|
|
||||||
|
remote_read:
|
||||||
|
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
37
deploy/docker-swarm/common/nginx-config.conf
Normal file
37
deploy/docker-swarm/common/nginx-config.conf
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
server {
|
||||||
|
listen 3301;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
gzip on;
|
||||||
|
gzip_static on;
|
||||||
|
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_comp_level 6;
|
||||||
|
gzip_buffers 16 8k;
|
||||||
|
gzip_http_version 1.1;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
if ( $uri = '/index.html' ) {
|
||||||
|
add_header Cache-Control no-store always;
|
||||||
|
}
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/alertmanager {
|
||||||
|
proxy_pass http://alertmanager:9093/api/v2;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api {
|
||||||
|
proxy_pass http://query-service:8080/api;
|
||||||
|
}
|
||||||
|
|
||||||
|
# redirect server error pages to the static page /50x.html
|
||||||
|
#
|
||||||
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
location = /50x.html {
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
}
|
||||||
|
}
|
||||||
0
deploy/docker-swarm/dashboards/.gitkeep
Normal file
0
deploy/docker-swarm/dashboards/.gitkeep
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
global:
|
||||||
|
resolve_timeout: 1m
|
||||||
|
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||||
|
|
||||||
|
route:
|
||||||
|
receiver: 'slack-notifications'
|
||||||
|
|
||||||
|
receivers:
|
||||||
|
- name: 'slack-notifications'
|
||||||
|
slack_configs:
|
||||||
|
- channel: '#alerts'
|
||||||
|
send_resolved: true
|
||||||
|
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||||
|
title: |-
|
||||||
|
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||||
|
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||||
|
{{" "}}(
|
||||||
|
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||||
|
{{- range $index, $label := .SortedPairs -}}
|
||||||
|
{{ if $index }}, {{ end }}
|
||||||
|
{{- $label.Name }}="{{ $label.Value -}}"
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
)
|
||||||
|
{{- end }}
|
||||||
|
text: >-
|
||||||
|
{{ range .Alerts -}}
|
||||||
|
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||||
|
|
||||||
|
*Description:* {{ .Annotations.description }}
|
||||||
|
|
||||||
|
*Details:*
|
||||||
|
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}
|
||||||
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
groups:
|
||||||
|
- name: ExampleCPULoadGroup
|
||||||
|
rules:
|
||||||
|
- alert: HighCpuLoad
|
||||||
|
expr: system_cpu_load_average_1m > 0.1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: High CPU load
|
||||||
|
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
1304
deploy/docker/clickhouse-setup/clickhouse-config.xml
Normal file
1304
deploy/docker/clickhouse-setup/clickhouse-config.xml
Normal file
File diff suppressed because it is too large
Load Diff
29
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
29
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<default>
|
||||||
|
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<type>s3</type>
|
||||||
|
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||||
|
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||||
|
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||||
|
</s3>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<tiered>
|
||||||
|
<volumes>
|
||||||
|
<default>
|
||||||
|
<disk>default</disk>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<disk>s3</disk>
|
||||||
|
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||||
|
</s3>
|
||||||
|
</volumes>
|
||||||
|
</tiered>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
</clickhouse>
|
||||||
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||||
|
|
||||||
|
<!-- Profiles of settings. -->
|
||||||
|
<profiles>
|
||||||
|
<!-- Default settings. -->
|
||||||
|
<default>
|
||||||
|
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||||
|
<max_memory_usage>10000000000</max_memory_usage>
|
||||||
|
|
||||||
|
<!-- How to choose between replicas during distributed query processing.
|
||||||
|
random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
with minimum number of different symbols between replica's hostname and local hostname
|
||||||
|
(Hamming distance).
|
||||||
|
in_order - first live replica is chosen in specified order.
|
||||||
|
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
-->
|
||||||
|
<load_balancing>random</load_balancing>
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Profile that allows only read queries. -->
|
||||||
|
<readonly>
|
||||||
|
<readonly>1</readonly>
|
||||||
|
</readonly>
|
||||||
|
</profiles>
|
||||||
|
|
||||||
|
<!-- Users and ACL. -->
|
||||||
|
<users>
|
||||||
|
<!-- If user name was not specified, 'default' user is used. -->
|
||||||
|
<default>
|
||||||
|
<!-- See also the files in users.d directory where the password can be overridden.
|
||||||
|
|
||||||
|
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
|
||||||
|
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
Example: <password>qwerty</password>.
|
||||||
|
Password could be empty.
|
||||||
|
|
||||||
|
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||||
|
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
|
||||||
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||||
|
place its name in 'server' element inside 'ldap' element.
|
||||||
|
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||||
|
|
||||||
|
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||||
|
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||||
|
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||||
|
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||||
|
whose initiator's realm matches it.
|
||||||
|
Example: <kerberos />
|
||||||
|
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||||
|
|
||||||
|
How to generate decent password:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
|
||||||
|
How to generate double SHA1:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding double SHA1.
|
||||||
|
-->
|
||||||
|
<password></password>
|
||||||
|
|
||||||
|
<!-- List of networks with open access.
|
||||||
|
|
||||||
|
To open access from everywhere, specify:
|
||||||
|
<ip>::/0</ip>
|
||||||
|
|
||||||
|
To open access only from localhost, specify:
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
|
||||||
|
Each element of list has one of the following forms:
|
||||||
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
<host> Hostname. Example: server01.clickhouse.com.
|
||||||
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
Strongly recommended that regexp is ends with $
|
||||||
|
All results of DNS requests are cached till server restart.
|
||||||
|
-->
|
||||||
|
<networks>
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
|
||||||
|
<!-- Settings profile for user. -->
|
||||||
|
<profile>default</profile>
|
||||||
|
|
||||||
|
<!-- Quota for user. -->
|
||||||
|
<quota>default</quota>
|
||||||
|
|
||||||
|
<!-- User can create other users and grant rights to them. -->
|
||||||
|
<!-- <access_management>1</access_management> -->
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
|
||||||
|
<!-- Quotas. -->
|
||||||
|
<quotas>
|
||||||
|
<!-- Name of quota. -->
|
||||||
|
<default>
|
||||||
|
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||||
|
<interval>
|
||||||
|
<!-- Length of interval. -->
|
||||||
|
<duration>3600</duration>
|
||||||
|
|
||||||
|
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||||
|
<queries>0</queries>
|
||||||
|
<errors>0</errors>
|
||||||
|
<result_rows>0</result_rows>
|
||||||
|
<read_rows>0</read_rows>
|
||||||
|
<execution_time>0</execution_time>
|
||||||
|
</interval>
|
||||||
|
</default>
|
||||||
|
</quotas>
|
||||||
|
</clickhouse>
|
||||||
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
144
deploy/docker/clickhouse-setup/docker-compose.yaml
Normal file
144
deploy/docker/clickhouse-setup/docker-compose.yaml
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
version: "2.4"
|
||||||
|
|
||||||
|
services:
|
||||||
|
clickhouse:
|
||||||
|
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||||
|
# ports:
|
||||||
|
# - "9000:9000"
|
||||||
|
# - "8123:8123"
|
||||||
|
tty: true
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
restart: on-failure
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: signoz/alertmanager:0.23.0-0.2
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
|
depends_on:
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: on-failure
|
||||||
|
command:
|
||||||
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
|
||||||
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
|
query-service:
|
||||||
|
image: signoz/query-service:0.10.2
|
||||||
|
container_name: query-service
|
||||||
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
|
- ../dashboards:/root/config/dashboards
|
||||||
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
|
environment:
|
||||||
|
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||||
|
- STORAGE=clickhouse
|
||||||
|
- GODEBUG=netdns=go
|
||||||
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
image: signoz/frontend:0.10.2
|
||||||
|
container_name: frontend
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
|
- query-service
|
||||||
|
ports:
|
||||||
|
- "3301:3301"
|
||||||
|
volumes:
|
||||||
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector:
|
||||||
|
image: signoz/otelcontribcol:0.45.1-1.3
|
||||||
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
|
ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
mem_limit: 2000m
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
otel-collector-metrics:
|
||||||
|
image: signoz/otelcontribcol:0.45.1-1.3
|
||||||
|
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
|
# ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "13133:13133" # Health check extension
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
hotrod:
|
||||||
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
container_name: hotrod
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
command: ["all"]
|
||||||
|
environment:
|
||||||
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
|
load-hotrod:
|
||||||
|
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||||
|
container_name: load-hotrod
|
||||||
|
hostname: load-hotrod
|
||||||
|
environment:
|
||||||
|
ATTACKED_HOST: http://hotrod:8080
|
||||||
|
LOCUST_MODE: standalone
|
||||||
|
NO_PROXY: standalone
|
||||||
|
TASK_DELAY_FROM: 5
|
||||||
|
TASK_DELAY_TO: 30
|
||||||
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
|
volumes:
|
||||||
|
- ../common/locust-scripts:/locust
|
||||||
111
deploy/docker/clickhouse-setup/otel-collector-config.yaml
Normal file
111
deploy/docker/clickhouse-setup/otel-collector-config.yaml
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
receivers:
|
||||||
|
opencensus:
|
||||||
|
endpoint: 0.0.0.0:55678
|
||||||
|
otlp/spanmetrics:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: localhost:12345
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: 0.0.0.0:4317
|
||||||
|
http:
|
||||||
|
endpoint: 0.0.0.0:4318
|
||||||
|
jaeger:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: 0.0.0.0:14250
|
||||||
|
thrift_http:
|
||||||
|
endpoint: 0.0.0.0:14268
|
||||||
|
# thrift_compact:
|
||||||
|
# endpoint: 0.0.0.0:6831
|
||||||
|
# thrift_binary:
|
||||||
|
# endpoint: 0.0.0.0:6832
|
||||||
|
hostmetrics:
|
||||||
|
collection_interval: 60s
|
||||||
|
scrapers:
|
||||||
|
cpu: {}
|
||||||
|
load: {}
|
||||||
|
memory: {}
|
||||||
|
disk: {}
|
||||||
|
filesystem: {}
|
||||||
|
network: {}
|
||||||
|
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
|
timeout: 10s
|
||||||
|
signozspanmetrics/prometheus:
|
||||||
|
metrics_exporter: prometheus
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 10000
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# memory_limiter:
|
||||||
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
|
# queued_retry:
|
||||||
|
# num_workers: 4
|
||||||
|
# queue_size: 100
|
||||||
|
# retry_on_failure: true
|
||||||
|
resourcedetection:
|
||||||
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
|
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||||
|
timeout: 2s
|
||||||
|
override: false
|
||||||
|
|
||||||
|
extensions:
|
||||||
|
health_check:
|
||||||
|
endpoint: 0.0.0.0:13133
|
||||||
|
zpages:
|
||||||
|
endpoint: 0.0.0.0:55679
|
||||||
|
pprof:
|
||||||
|
endpoint: 0.0.0.0:1777
|
||||||
|
|
||||||
|
exporters:
|
||||||
|
clickhousetraces:
|
||||||
|
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||||
|
clickhousemetricswrite:
|
||||||
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
|
resource_to_telemetry_conversion:
|
||||||
|
enabled: true
|
||||||
|
prometheus:
|
||||||
|
endpoint: 0.0.0.0:8889
|
||||||
|
# logging: {}
|
||||||
|
|
||||||
|
service:
|
||||||
|
telemetry:
|
||||||
|
metrics:
|
||||||
|
address: 0.0.0.0:8888
|
||||||
|
extensions:
|
||||||
|
- health_check
|
||||||
|
- zpages
|
||||||
|
- pprof
|
||||||
|
pipelines:
|
||||||
|
traces:
|
||||||
|
receivers: [jaeger, otlp]
|
||||||
|
processors: [signozspanmetrics/prometheus, batch]
|
||||||
|
exporters: [clickhousetraces]
|
||||||
|
metrics:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
|
metrics/hostmetrics:
|
||||||
|
receivers: [hostmetrics]
|
||||||
|
processors: [resourcedetection, batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
|
metrics/spanmetrics:
|
||||||
|
receivers: [otlp/spanmetrics]
|
||||||
|
exporters: [prometheus]
|
||||||
@@ -0,0 +1,73 @@
|
|||||||
|
receivers:
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
http:
|
||||||
|
prometheus:
|
||||||
|
config:
|
||||||
|
scrape_configs:
|
||||||
|
# otel-collector internal metrics
|
||||||
|
- job_name: "otel-collector"
|
||||||
|
scrape_interval: 60s
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- otel-collector:8888
|
||||||
|
# otel-collector-metrics internal metrics
|
||||||
|
- job_name: "otel-collector-metrics"
|
||||||
|
scrape_interval: 60s
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- localhost:8888
|
||||||
|
# SigNoz span metrics
|
||||||
|
- job_name: "signozspanmetrics-collector"
|
||||||
|
scrape_interval: 60s
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- otel-collector:8889
|
||||||
|
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
|
timeout: 10s
|
||||||
|
# memory_limiter:
|
||||||
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
|
# queued_retry:
|
||||||
|
# num_workers: 4
|
||||||
|
# queue_size: 100
|
||||||
|
# retry_on_failure: true
|
||||||
|
|
||||||
|
extensions:
|
||||||
|
health_check:
|
||||||
|
endpoint: 0.0.0.0:13133
|
||||||
|
zpages:
|
||||||
|
endpoint: 0.0.0.0:55679
|
||||||
|
pprof:
|
||||||
|
endpoint: 0.0.0.0:1777
|
||||||
|
|
||||||
|
exporters:
|
||||||
|
clickhousemetricswrite:
|
||||||
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
|
|
||||||
|
service:
|
||||||
|
telemetry:
|
||||||
|
metrics:
|
||||||
|
address: 0.0.0.0:8888
|
||||||
|
extensions:
|
||||||
|
- health_check
|
||||||
|
- zpages
|
||||||
|
- pprof
|
||||||
|
pipelines:
|
||||||
|
metrics:
|
||||||
|
receivers: [prometheus]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
26
deploy/docker/clickhouse-setup/prometheus.yml
Normal file
26
deploy/docker/clickhouse-setup/prometheus.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# my global config
|
||||||
|
global:
|
||||||
|
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||||
|
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
# Alertmanager configuration
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- static_configs:
|
||||||
|
- targets:
|
||||||
|
- alertmanager:9093
|
||||||
|
|
||||||
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||||
|
rule_files:
|
||||||
|
# - "first_rules.yml"
|
||||||
|
# - "second_rules.yml"
|
||||||
|
- 'alerts.yml'
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
|
||||||
|
|
||||||
|
remote_read:
|
||||||
|
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
16
deploy/docker/common/locust-scripts/locustfile.py
Normal file
16
deploy/docker/common/locust-scripts/locustfile.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
from locust import HttpUser, task, between
|
||||||
|
class UserTasks(HttpUser):
|
||||||
|
wait_time = between(5, 15)
|
||||||
|
|
||||||
|
@task
|
||||||
|
def rachel(self):
|
||||||
|
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
|
||||||
|
@task
|
||||||
|
def trom(self):
|
||||||
|
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
|
||||||
|
@task
|
||||||
|
def japanese(self):
|
||||||
|
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
|
||||||
|
@task
|
||||||
|
def coffee(self):
|
||||||
|
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
|
||||||
42
deploy/docker/common/nginx-config.conf
Normal file
42
deploy/docker/common/nginx-config.conf
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
server {
|
||||||
|
listen 3301;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
gzip on;
|
||||||
|
gzip_static on;
|
||||||
|
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_comp_level 6;
|
||||||
|
gzip_buffers 16 8k;
|
||||||
|
gzip_http_version 1.1;
|
||||||
|
|
||||||
|
# to handle uri issue 414 from nginx
|
||||||
|
client_max_body_size 24M;
|
||||||
|
|
||||||
|
large_client_header_buffers 8 16k;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
if ( $uri = '/index.html' ) {
|
||||||
|
add_header Cache-Control no-store always;
|
||||||
|
}
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/alertmanager {
|
||||||
|
proxy_pass http://alertmanager:9093/api/v2;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api {
|
||||||
|
proxy_pass http://query-service:8080/api;
|
||||||
|
}
|
||||||
|
|
||||||
|
# redirect server error pages to the static page /50x.html
|
||||||
|
#
|
||||||
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
location = /50x.html {
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,264 +0,0 @@
|
|||||||
version: "2.4"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
metadata_data: {}
|
|
||||||
middle_var: {}
|
|
||||||
historical_var: {}
|
|
||||||
broker_var: {}
|
|
||||||
coordinator_var: {}
|
|
||||||
router_var: {}
|
|
||||||
|
|
||||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
|
||||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
zookeeper:
|
|
||||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
|
||||||
ports:
|
|
||||||
- "2181:2181"
|
|
||||||
environment:
|
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
|
|
||||||
|
|
||||||
kafka:
|
|
||||||
# image: wurstmeister/kafka
|
|
||||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
|
||||||
ports:
|
|
||||||
- "9092:9092"
|
|
||||||
hostname: kafka
|
|
||||||
environment:
|
|
||||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
|
||||||
KAFKA_ADVERTISED_PORT: 9092
|
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
|
||||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
|
||||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
|
||||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
|
||||||
|
|
||||||
healthcheck:
|
|
||||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
|
||||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 10
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
|
|
||||||
postgres:
|
|
||||||
container_name: postgres
|
|
||||||
image: postgres:latest
|
|
||||||
volumes:
|
|
||||||
- metadata_data:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_PASSWORD=FoolishPassword
|
|
||||||
- POSTGRES_USER=druid
|
|
||||||
- POSTGRES_DB=druid
|
|
||||||
|
|
||||||
coordinator:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: coordinator
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/data
|
|
||||||
- coordinator_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
ports:
|
|
||||||
- "8081:8081"
|
|
||||||
command:
|
|
||||||
- coordinator
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/coordinator
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
broker:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: broker
|
|
||||||
volumes:
|
|
||||||
- broker_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8082:8082"
|
|
||||||
command:
|
|
||||||
- broker
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/broker
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
historical:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: historical
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/data
|
|
||||||
- historical_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8083:8083"
|
|
||||||
command:
|
|
||||||
- historical
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/historical
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
middlemanager:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: middlemanager
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/data
|
|
||||||
- middle_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8091:8091"
|
|
||||||
command:
|
|
||||||
- middleManager
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/middlemanager
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
router:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: router
|
|
||||||
volumes:
|
|
||||||
- router_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8888:8888"
|
|
||||||
command:
|
|
||||||
- router
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/router
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
flatten-processor:
|
|
||||||
image: signoz/flattener-processor:0.2.0
|
|
||||||
container_name: flattener-processor
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- kafka
|
|
||||||
- otel-collector
|
|
||||||
ports:
|
|
||||||
- "8000:8000"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- KAFKA_BROKER=kafka:9092
|
|
||||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
|
||||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
|
||||||
|
|
||||||
|
|
||||||
query-service:
|
|
||||||
image: signoz.docker.scarf.sh/signoz/query-service:0.2.2
|
|
||||||
container_name: query-service
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
ports:
|
|
||||||
- "8080:8080"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- DruidClientUrl=http://router:8888
|
|
||||||
- DruidDatasource=flattened_spans
|
|
||||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
|
||||||
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
image: signoz/frontend:0.2.3
|
|
||||||
container_name: frontend
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- query-service
|
|
||||||
links:
|
|
||||||
- "query-service"
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
volumes:
|
|
||||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
create-supervisor:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: create-supervisor
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
|
||||||
|
|
||||||
|
|
||||||
set-retention:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: set-retention
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
|
||||||
|
|
||||||
otel-collector:
|
|
||||||
image: otel/opentelemetry-collector:0.18.0
|
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
|
||||||
ports:
|
|
||||||
- "1777:1777" # pprof extension
|
|
||||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
|
||||||
- "14268:14268" # Jaeger receiver
|
|
||||||
- "55678" # OpenCensus receiver
|
|
||||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
|
||||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
|
||||||
- "55679:55679" # zpages extension
|
|
||||||
- "13133" # health_check
|
|
||||||
depends_on:
|
|
||||||
kafka:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
|
|
||||||
hotrod:
|
|
||||||
image: jaegertracing/example-hotrod:latest
|
|
||||||
container_name: hotrod
|
|
||||||
ports:
|
|
||||||
- "9000:8080"
|
|
||||||
command: ["all"]
|
|
||||||
environment:
|
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
|
||||||
|
|
||||||
|
|
||||||
load-hotrod:
|
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
|
||||||
ports:
|
|
||||||
- "8089:8089"
|
|
||||||
environment:
|
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
|
||||||
LOCUST_MODE: standalone
|
|
||||||
NO_PROXY: standalone
|
|
||||||
TASK_DELAY_FROM: 5
|
|
||||||
TASK_DELAY_TO: 30
|
|
||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
|
||||||
volumes:
|
|
||||||
- ./locust-scripts:/locust
|
|
||||||
|
|
||||||
@@ -1,259 +0,0 @@
|
|||||||
version: "2.4"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
metadata_data: {}
|
|
||||||
middle_var: {}
|
|
||||||
historical_var: {}
|
|
||||||
broker_var: {}
|
|
||||||
coordinator_var: {}
|
|
||||||
router_var: {}
|
|
||||||
|
|
||||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
|
||||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
zookeeper:
|
|
||||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
|
||||||
ports:
|
|
||||||
- "2181:2181"
|
|
||||||
environment:
|
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
|
|
||||||
|
|
||||||
kafka:
|
|
||||||
# image: wurstmeister/kafka
|
|
||||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
|
||||||
ports:
|
|
||||||
- "9092:9092"
|
|
||||||
hostname: kafka
|
|
||||||
environment:
|
|
||||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
|
||||||
KAFKA_ADVERTISED_PORT: 9092
|
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
|
||||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
|
||||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
|
||||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
|
||||||
|
|
||||||
healthcheck:
|
|
||||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
|
||||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 10
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
|
|
||||||
postgres:
|
|
||||||
container_name: postgres
|
|
||||||
image: postgres:latest
|
|
||||||
volumes:
|
|
||||||
- metadata_data:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_PASSWORD=FoolishPassword
|
|
||||||
- POSTGRES_USER=druid
|
|
||||||
- POSTGRES_DB=druid
|
|
||||||
|
|
||||||
coordinator:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: coordinator
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/druid/deepStorage
|
|
||||||
- coordinator_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
ports:
|
|
||||||
- "8081:8081"
|
|
||||||
command:
|
|
||||||
- coordinator
|
|
||||||
env_file:
|
|
||||||
- environment_small/coordinator
|
|
||||||
|
|
||||||
broker:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: broker
|
|
||||||
volumes:
|
|
||||||
- broker_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8082:8082"
|
|
||||||
command:
|
|
||||||
- broker
|
|
||||||
env_file:
|
|
||||||
- environment_small/broker
|
|
||||||
|
|
||||||
historical:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: historical
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/druid/deepStorage
|
|
||||||
- historical_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8083:8083"
|
|
||||||
command:
|
|
||||||
- historical
|
|
||||||
env_file:
|
|
||||||
- environment_small/historical
|
|
||||||
|
|
||||||
middlemanager:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: middlemanager
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/druid/deepStorage
|
|
||||||
- middle_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8091:8091"
|
|
||||||
command:
|
|
||||||
- middleManager
|
|
||||||
env_file:
|
|
||||||
- environment_small/middlemanager
|
|
||||||
|
|
||||||
router:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: router
|
|
||||||
volumes:
|
|
||||||
- router_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8888:8888"
|
|
||||||
command:
|
|
||||||
- router
|
|
||||||
env_file:
|
|
||||||
- environment_small/router
|
|
||||||
|
|
||||||
flatten-processor:
|
|
||||||
image: signoz/flattener-processor:0.2.0
|
|
||||||
container_name: flattener-processor
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- kafka
|
|
||||||
- otel-collector
|
|
||||||
ports:
|
|
||||||
- "8000:8000"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- KAFKA_BROKER=kafka:9092
|
|
||||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
|
||||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
|
||||||
|
|
||||||
|
|
||||||
query-service:
|
|
||||||
image: signoz.docker.scarf.sh/signoz/query-service:0.2.2
|
|
||||||
container_name: query-service
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
ports:
|
|
||||||
- "8080:8080"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- DruidClientUrl=http://router:8888
|
|
||||||
- DruidDatasource=flattened_spans
|
|
||||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
|
||||||
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
image: signoz/frontend:0.2.3
|
|
||||||
container_name: frontend
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- query-service
|
|
||||||
links:
|
|
||||||
- "query-service"
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
volumes:
|
|
||||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
create-supervisor:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: create-supervisor
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
|
||||||
|
|
||||||
|
|
||||||
set-retention:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: set-retention
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
|
||||||
|
|
||||||
otel-collector:
|
|
||||||
image: otel/opentelemetry-collector:0.18.0
|
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
|
||||||
ports:
|
|
||||||
- "1777:1777" # pprof extension
|
|
||||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
|
||||||
- "14268:14268" # Jaeger receiver
|
|
||||||
- "55678" # OpenCensus receiver
|
|
||||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
|
||||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
|
||||||
- "55679:55679" # zpages extension
|
|
||||||
- "13133" # health_check
|
|
||||||
depends_on:
|
|
||||||
kafka:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
|
|
||||||
hotrod:
|
|
||||||
image: jaegertracing/example-hotrod:latest
|
|
||||||
container_name: hotrod
|
|
||||||
ports:
|
|
||||||
- "9000:8080"
|
|
||||||
command: ["all"]
|
|
||||||
environment:
|
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
|
||||||
|
|
||||||
|
|
||||||
load-hotrod:
|
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
|
||||||
ports:
|
|
||||||
- "8089:8089"
|
|
||||||
environment:
|
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
|
||||||
LOCUST_MODE: standalone
|
|
||||||
NO_PROXY: standalone
|
|
||||||
TASK_DELAY_FROM: 5
|
|
||||||
TASK_DELAY_TO: 30
|
|
||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
|
||||||
volumes:
|
|
||||||
- ./locust-scripts:/locust
|
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
{
|
|
||||||
"type": "kafka",
|
|
||||||
"dataSchema": {
|
|
||||||
"dataSource": "flattened_spans",
|
|
||||||
"parser": {
|
|
||||||
"type": "string",
|
|
||||||
"parseSpec": {
|
|
||||||
"format": "json",
|
|
||||||
"timestampSpec": {
|
|
||||||
"column": "StartTimeUnixNano",
|
|
||||||
"format": "nano"
|
|
||||||
},
|
|
||||||
"dimensionsSpec": {
|
|
||||||
"dimensions": [
|
|
||||||
"TraceId",
|
|
||||||
"SpanId",
|
|
||||||
"ParentSpanId",
|
|
||||||
"Name",
|
|
||||||
"ServiceName",
|
|
||||||
"References",
|
|
||||||
"Tags",
|
|
||||||
"ExternalHttpMethod",
|
|
||||||
"ExternalHttpUrl",
|
|
||||||
"Component",
|
|
||||||
"DBSystem",
|
|
||||||
"DBName",
|
|
||||||
"DBOperation",
|
|
||||||
"PeerService",
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"name": "TagsKeys",
|
|
||||||
"multiValueHandling": "ARRAY"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"name": "TagsValues",
|
|
||||||
"multiValueHandling": "ARRAY"
|
|
||||||
},
|
|
||||||
{ "name": "DurationNano", "type": "Long" },
|
|
||||||
{ "name": "Kind", "type": "int" },
|
|
||||||
{ "name": "StatusCode", "type": "int" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"metricsSpec" : [
|
|
||||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
|
||||||
],
|
|
||||||
"granularitySpec": {
|
|
||||||
"type": "uniform",
|
|
||||||
"segmentGranularity": "DAY",
|
|
||||||
"queryGranularity": "NONE",
|
|
||||||
"rollup": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tuningConfig": {
|
|
||||||
"type": "kafka",
|
|
||||||
"reportParseExceptions": true
|
|
||||||
},
|
|
||||||
"ioConfig": {
|
|
||||||
"topic": "flattened_spans",
|
|
||||||
"replicas": 1,
|
|
||||||
"taskDuration": "PT20M",
|
|
||||||
"completionTimeout": "PT30M",
|
|
||||||
"consumerProperties": {
|
|
||||||
"bootstrap.servers": "kafka:9092"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=100MiB
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=200MiB
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=2
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=1g
|
|
||||||
DRUID_XMS=1g
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=200MiB
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=2
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=128m
|
|
||||||
DRUID_XMS=128m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=50MiB
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
# For S3 storage
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
|
||||||
|
|
||||||
|
|
||||||
# druid_storage_type=s3
|
|
||||||
# druid_storage_bucket=<s3-bucket-name>
|
|
||||||
# druid_storage_baseKey=druid/segments
|
|
||||||
|
|
||||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
|
||||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
|
||||||
# AWS_REGION=<s3-aws-region>
|
|
||||||
|
|
||||||
# druid_indexer_logs_type=s3
|
|
||||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
|
||||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
|
||||||
|
|
||||||
# -----------------------------------------------------------
|
|
||||||
# For local storage
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/data/segments
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
|
||||||
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=50MiB
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
server {
|
|
||||||
listen 3000;
|
|
||||||
server_name _;
|
|
||||||
location / {
|
|
||||||
root /usr/share/nginx/html;
|
|
||||||
index index.html index.htm;
|
|
||||||
try_files $uri $uri/ /index.html;
|
|
||||||
}
|
|
||||||
location /api {
|
|
||||||
proxy_pass http://query-service:8080/api;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
# redirect server error pages to the static page /50x.html
|
|
||||||
#
|
|
||||||
error_page 500 502 503 504 /50x.html;
|
|
||||||
location = /50x.html {
|
|
||||||
root /usr/share/nginx/html;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
receivers:
|
|
||||||
otlp:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
http:
|
|
||||||
jaeger:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
thrift_http:
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 1000
|
|
||||||
timeout: 10s
|
|
||||||
memory_limiter:
|
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
|
||||||
ballast_size_mib: 683
|
|
||||||
# 80% of maximum memory up to 2G
|
|
||||||
limit_mib: 1500
|
|
||||||
# 25% of limit up to 2G
|
|
||||||
spike_limit_mib: 512
|
|
||||||
check_interval: 5s
|
|
||||||
queued_retry:
|
|
||||||
num_workers: 4
|
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure: true
|
|
||||||
extensions:
|
|
||||||
health_check: {}
|
|
||||||
zpages: {}
|
|
||||||
exporters:
|
|
||||||
kafka/traces:
|
|
||||||
brokers:
|
|
||||||
- kafka:9092
|
|
||||||
topic: 'otlp_spans'
|
|
||||||
protocol_version: 2.0.0
|
|
||||||
|
|
||||||
kafka/metrics:
|
|
||||||
brokers:
|
|
||||||
- kafka:9092
|
|
||||||
topic: 'otlp_metrics'
|
|
||||||
protocol_version: 2.0.0
|
|
||||||
service:
|
|
||||||
extensions: [health_check, zpages]
|
|
||||||
pipelines:
|
|
||||||
traces:
|
|
||||||
receivers: [jaeger, otlp]
|
|
||||||
processors: [memory_limiter, batch, queued_retry]
|
|
||||||
exporters: [kafka/traces]
|
|
||||||
metrics:
|
|
||||||
receivers: [otlp]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [kafka/metrics]
|
|
||||||
@@ -1,256 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
|
|
||||||
is_command_present() {
|
|
||||||
type "$1" >/dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
is_mac() {
|
|
||||||
[[ $OSTYPE == darwin* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
check_k8s_setup() {
|
|
||||||
echo "Checking your k8s setup status"
|
|
||||||
if ! is_command_present kubectl; then
|
|
||||||
echo "Please install kubectl on your machine"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
|
|
||||||
if ! is_command_present jq; then
|
|
||||||
install_jq
|
|
||||||
fi
|
|
||||||
clusters=`kubectl config view -o json | jq -r '."current-context"'`
|
|
||||||
if [[ ! -n $clusters ]]; then
|
|
||||||
echo "Please setup a k8s cluster & config kubectl to connect to it"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
k8s_minor_version=`kubectl version --short -o json | jq ."serverVersion.minor" | sed 's/[^0-9]*//g'`
|
|
||||||
# if [[ $k8s_minor_version < 18 ]]; then
|
|
||||||
# echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
|
||||||
# echo "SigNoz deployments require Kubernetes >= v1.18. Found version: v1.$k8s_minor_version"
|
|
||||||
# echo "+++++++++++ ++++++++++++++++++++++++++++"
|
|
||||||
# exit 1
|
|
||||||
# fi;
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
install_jq(){
|
|
||||||
if [ $package_manager == "brew" ]; then
|
|
||||||
brew install jq
|
|
||||||
elif [ $package_manager == "yum" ]; then
|
|
||||||
yum_cmd="sudo yum --assumeyes --quiet"
|
|
||||||
$yum_cmd install jq
|
|
||||||
else
|
|
||||||
apt_cmd="sudo apt-get --yes --quiet"
|
|
||||||
$apt_cmd update
|
|
||||||
$apt_cmd install jq
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
check_os() {
|
|
||||||
if is_mac; then
|
|
||||||
package_manager="brew"
|
|
||||||
desired_os=1
|
|
||||||
os="Mac"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
|
|
||||||
|
|
||||||
case "$os_name" in
|
|
||||||
Ubuntu*)
|
|
||||||
desired_os=1
|
|
||||||
os="ubuntu"
|
|
||||||
package_manager="apt-get"
|
|
||||||
;;
|
|
||||||
Debian*)
|
|
||||||
desired_os=1
|
|
||||||
os="debian"
|
|
||||||
package_manager="apt-get"
|
|
||||||
;;
|
|
||||||
Red\ Hat*)
|
|
||||||
desired_os=1
|
|
||||||
os="red hat"
|
|
||||||
package_manager="yum"
|
|
||||||
;;
|
|
||||||
CentOS*)
|
|
||||||
desired_os=1
|
|
||||||
os="centos"
|
|
||||||
package_manager="yum"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
desired_os=0
|
|
||||||
os="Not Found"
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
echo_contact_support() {
|
|
||||||
echo "Please contact <support@signoz.io> with your OS details and version${1:-.}"
|
|
||||||
}
|
|
||||||
|
|
||||||
bye() { # Prints a friendly good bye message and exits the script.
|
|
||||||
set +o errexit
|
|
||||||
echo "Please share your email to receive support with the installation"
|
|
||||||
read -rp 'Email: ' email
|
|
||||||
|
|
||||||
while [[ $email == "" ]]
|
|
||||||
do
|
|
||||||
read -rp 'Email: ' email
|
|
||||||
done
|
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\nExiting for now. Bye! \U1F44B\n"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
deploy_app() {
|
|
||||||
kubectl apply -f "$install_dir/config-template"
|
|
||||||
kubectl apply -f "$install_dir"
|
|
||||||
}
|
|
||||||
|
|
||||||
wait_for_application_start() {
|
|
||||||
local timeout=$1
|
|
||||||
address=$custom_domain
|
|
||||||
if [[ "$ssl_enable" == "true" ]]; then
|
|
||||||
protocol="https"
|
|
||||||
else
|
|
||||||
protocol="http"
|
|
||||||
fi
|
|
||||||
# The while loop is important because for-loops don't work for dynamic values
|
|
||||||
while [[ $timeout -gt 0 ]]; do
|
|
||||||
if [[ $address == "" || $address == null ]]; then
|
|
||||||
address=`kubectl get ingress appsmith-ingress -o json | jq -r '.status.loadBalancer.ingress[0].ip'`
|
|
||||||
fi
|
|
||||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" $protocol://$address/api/v1 || true)"
|
|
||||||
if [[ status_code -eq 401 ]]; then
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
|
|
||||||
fi
|
|
||||||
((timeout--))
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
echo -e "👋 Thank you for trying out SigNoz! "
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
|
|
||||||
# Checking OS and assigning package manager
|
|
||||||
desired_os=0
|
|
||||||
os=""
|
|
||||||
echo -e "🕵️ Detecting your OS"
|
|
||||||
check_os
|
|
||||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
|
||||||
|
|
||||||
# Run bye if failure happens
|
|
||||||
trap bye EXIT
|
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
|
|
||||||
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check for kubernetes setup
|
|
||||||
check_k8s_setup
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Deploy Appmisth on your cluster"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
deploy_app
|
|
||||||
|
|
||||||
wait_for_application_start 60
|
|
||||||
|
|
||||||
|
|
||||||
if [[ $status_code -ne 200 ]]; then
|
|
||||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
|
||||||
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
|
||||||
echo ""
|
|
||||||
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
|
|
||||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
|
||||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
|
||||||
|
|
||||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
|
||||||
|
|
||||||
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
|
|
||||||
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
else
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
|
|
||||||
echo "Your installation is complete!"
|
|
||||||
echo ""
|
|
||||||
echo "Your frontend is running on 'http://localhost:3000'."
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
|
||||||
echo ""
|
|
||||||
echo "Need help Getting Started?"
|
|
||||||
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
|
||||||
echo ""
|
|
||||||
echo "Please share your email to receive support & updates about SigNoz!"
|
|
||||||
read -rp 'Email: ' email
|
|
||||||
|
|
||||||
while [[ $email == "" ]]
|
|
||||||
do
|
|
||||||
read -rp 'Email: ' email
|
|
||||||
done
|
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s" } }'
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\nThank you!\n"
|
|
||||||
@@ -2,6 +2,16 @@
|
|||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
|
# Regular Colors
|
||||||
|
Black='\033[0;30m' # Black
|
||||||
|
Red='\[\e[0;31m\]' # Red
|
||||||
|
Green='\033[0;32m' # Green
|
||||||
|
Yellow='\033[0;33m' # Yellow
|
||||||
|
Blue='\033[0;34m' # Blue
|
||||||
|
Purple='\033[0;35m' # Purple
|
||||||
|
Cyan='\033[0;36m' # Cyan
|
||||||
|
White='\033[0;37m' # White
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
is_command_present() {
|
is_command_present() {
|
||||||
type "$1" >/dev/null 2>&1
|
type "$1" >/dev/null 2>&1
|
||||||
@@ -26,6 +36,10 @@ is_mac() {
|
|||||||
[[ $OSTYPE == darwin* ]]
|
[[ $OSTYPE == darwin* ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# is_arm64(){
|
||||||
|
# [[ `uname -m` == 'arm64' ]]
|
||||||
|
# }
|
||||||
|
|
||||||
check_os() {
|
check_os() {
|
||||||
if is_mac; then
|
if is_mac; then
|
||||||
package_manager="brew"
|
package_manager="brew"
|
||||||
@@ -88,7 +102,7 @@ check_os() {
|
|||||||
# The script should error out in case they aren't available
|
# The script should error out in case they aren't available
|
||||||
check_ports_occupied() {
|
check_ports_occupied() {
|
||||||
local port_check_output
|
local port_check_output
|
||||||
local ports_pattern="80|443"
|
local ports_pattern="3301|4317"
|
||||||
|
|
||||||
if is_mac; then
|
if is_mac; then
|
||||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||||
@@ -102,18 +116,10 @@ check_ports_occupied() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $port_check_output ]]; then
|
if [[ -n $port_check_output ]]; then
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }'
|
send_event "port_not_available"
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||||
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
|
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||||
echo ""
|
echo ""
|
||||||
@@ -127,57 +133,44 @@ install_docker() {
|
|||||||
|
|
||||||
|
|
||||||
if [[ $package_manager == apt-get ]]; then
|
if [[ $package_manager == apt-get ]]; then
|
||||||
apt_cmd="sudo apt-get --yes --quiet"
|
apt_cmd="$sudo_cmd apt-get --yes --quiet"
|
||||||
$apt_cmd update
|
$apt_cmd update
|
||||||
$apt_cmd install software-properties-common gnupg-agent
|
$apt_cmd install software-properties-common gnupg-agent
|
||||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
|
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||||
sudo add-apt-repository \
|
$sudo_cmd add-apt-repository \
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||||
$apt_cmd update
|
$apt_cmd update
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||||
elif [[ $package_manager == zypper ]]; then
|
elif [[ $package_manager == zypper ]]; then
|
||||||
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
|
zypper_cmd="$sudo_cmd zypper --quiet --no-gpg-checks --non-interactive"
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
if [[ $os == sles ]]; then
|
if [[ $os == sles ]]; then
|
||||||
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
||||||
os_arch="$(uname -i)"
|
os_arch="$(uname -i)"
|
||||||
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||||
fi
|
fi
|
||||||
$zypper_cmd install docker docker-runc containerd
|
$zypper_cmd install docker docker-runc containerd
|
||||||
sudo systemctl enable docker.service
|
$sudo_cmd systemctl enable docker.service
|
||||||
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
||||||
echo
|
echo
|
||||||
echo "Amazon Linux detected ... "
|
echo "Amazon Linux detected ... "
|
||||||
echo
|
echo
|
||||||
sudo yum install docker
|
# yum install docker
|
||||||
sudo service docker start
|
# service docker start
|
||||||
|
$sudo_cmd yum install -y amazon-linux-extras
|
||||||
|
$sudo_cmd amazon-linux-extras enable docker
|
||||||
|
$sudo_cmd yum install -y docker
|
||||||
else
|
else
|
||||||
|
|
||||||
yum_cmd="sudo yum --assumeyes --quiet"
|
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
|
||||||
$yum_cmd install yum-utils
|
$yum_cmd install yum-utils
|
||||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
|
||||||
install_docker_machine() {
|
|
||||||
|
|
||||||
echo "\nInstalling docker machine ..."
|
|
||||||
|
|
||||||
if [[ $os == "Mac" ]];then
|
|
||||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
|
|
||||||
chmod +x /usr/local/bin/docker-machine
|
|
||||||
else
|
|
||||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
|
|
||||||
chmod +x /tmp/docker-machine
|
|
||||||
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
install_docker_compose() {
|
install_docker_compose() {
|
||||||
@@ -185,22 +178,14 @@ install_docker_compose() {
|
|||||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||||
echo "++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++"
|
||||||
echo "Installing docker-compose"
|
echo "Installing docker-compose"
|
||||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||||
sudo chmod +x /usr/local/bin/docker-compose
|
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||||
echo "docker-compose installed!"
|
echo "docker-compose installed!"
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found" } }'
|
send_event "docker_compose_not_found"
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||||
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
|
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
|
||||||
@@ -211,36 +196,38 @@ install_docker_compose() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
start_docker() {
|
start_docker() {
|
||||||
echo "Starting Docker ..."
|
echo -e "🐳 Starting Docker ...\n"
|
||||||
if [ $os == "Mac" ]
|
if [[ $os == "Mac" ]]; then
|
||||||
then
|
|
||||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||||
else
|
else
|
||||||
if ! sudo systemctl is-active docker.service > /dev/null; then
|
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||||
echo "Starting docker service"
|
echo "Starting docker service"
|
||||||
sudo systemctl start docker.service
|
$sudo_cmd systemctl start docker.service
|
||||||
|
fi
|
||||||
|
# if [[ -z $sudo_cmd ]]; then
|
||||||
|
# docker ps > /dev/null && true
|
||||||
|
# if [[ $? -ne 0 ]]; then
|
||||||
|
# request_sudo
|
||||||
|
# fi
|
||||||
|
# fi
|
||||||
|
if [[ -z $sudo_cmd ]]; then
|
||||||
|
if ! docker ps > /dev/null && true; then
|
||||||
|
request_sudo
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_for_containers_start() {
|
wait_for_containers_start() {
|
||||||
local timeout=$1
|
local timeout=$1
|
||||||
|
|
||||||
# The while loop is important because for-loops don't work for dynamic values
|
# The while loop is important because for-loops don't work for dynamic values
|
||||||
while [[ $timeout -gt 0 ]]; do
|
while [[ $timeout -gt 0 ]]; do
|
||||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)"
|
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
|
||||||
if [[ status_code -eq 200 ]]; then
|
if [[ status_code -eq 200 ]]; then
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
||||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
|
||||||
|
|
||||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
|
||||||
echo "No Supervisors found... Re-applying docker compose\n"
|
|
||||||
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up -d
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
|
|
||||||
fi
|
fi
|
||||||
((timeout--))
|
((timeout--))
|
||||||
sleep 1
|
sleep 1
|
||||||
@@ -250,35 +237,29 @@ wait_for_containers_start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bye() { # Prints a friendly good bye message and exits the script.
|
bye() { # Prints a friendly good bye message and exits the script.
|
||||||
if [ "$?" -ne 0 ]; then
|
if [[ "$?" -ne 0 ]]; then
|
||||||
set +o errexit
|
set +o errexit
|
||||||
|
|
||||||
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
|
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||||
|
|
||||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|
||||||
echo "Please share your email to receive support with the installation"
|
if [[ $email == "" ]]; then
|
||||||
read -rp 'Email: ' email
|
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||||
|
|
||||||
while [[ $email == "" ]]
|
|
||||||
do
|
|
||||||
read -rp 'Email: ' email
|
read -rp 'Email: ' email
|
||||||
done
|
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
|
while [[ $email == "" ]]
|
||||||
URL="https://app.posthog.com/capture"
|
do
|
||||||
HEADER="Content-Type: application/json"
|
read -rp 'Email: ' email
|
||||||
|
done
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
send_event "installation_support"
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "\nWe will reach out to you at the email provided shortly, Exiting for now. Bye! 👋 \n"
|
echo -e "\nWe will reach out to you at the email provided shortly, Exiting for now. Bye! 👋 \n"
|
||||||
@@ -286,149 +267,260 @@ bye() { # Prints a friendly good bye message and exits the script.
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
request_sudo() {
|
||||||
|
if hash sudo 2>/dev/null; then
|
||||||
|
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||||
|
if (( $EUID != 0 )); then
|
||||||
|
sudo_cmd="sudo"
|
||||||
|
echo -e "Please enter your sudo password, if prompt."
|
||||||
|
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||||
|
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||||
|
# echo "Need sudo privileges to proceed with the installation."
|
||||||
|
# exit 1;
|
||||||
|
# fi
|
||||||
|
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||||
|
echo "Need sudo privileges to proceed with the installation."
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "Got it! Thanks!! 🙏\n"
|
||||||
|
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
echo ""
|
||||||
echo -e "👋 Thank you for trying out SigNoz! "
|
echo -e "👋 Thank you for trying out SigNoz! "
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
|
sudo_cmd=""
|
||||||
|
|
||||||
|
# Check sudo permissions
|
||||||
|
if (( $EUID != 0 )); then
|
||||||
|
echo "🟡 Running installer with non-sudo permissions."
|
||||||
|
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
sudo_cmd="sudo"
|
||||||
|
fi
|
||||||
|
|
||||||
# Checking OS and assigning package manager
|
# Checking OS and assigning package manager
|
||||||
desired_os=0
|
desired_os=0
|
||||||
os=""
|
os=""
|
||||||
echo -e "🕵️ Detecting your OS"
|
email=""
|
||||||
|
echo -e "🌏 Detecting your OS ...\n"
|
||||||
check_os
|
check_os
|
||||||
|
|
||||||
|
# Obtain unique installation id
|
||||||
|
# sysinfo="$(uname -a)"
|
||||||
|
# if [[ $? -ne 0 ]]; then
|
||||||
|
# uuid="$(uuidgen)"
|
||||||
|
# uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||||
|
# sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||||
|
# fi
|
||||||
|
if ! sysinfo="$(uname -a)"; then
|
||||||
|
uuid="$(uuidgen)"
|
||||||
|
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||||
|
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||||
|
fi
|
||||||
|
|
||||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
digest_cmd=""
|
||||||
|
if hash shasum 2>/dev/null; then
|
||||||
|
digest_cmd="shasum -a 256"
|
||||||
|
elif hash sha256sum 2>/dev/null; then
|
||||||
|
digest_cmd="sha256sum"
|
||||||
|
elif hash openssl 2>/dev/null; then
|
||||||
|
digest_cmd="openssl dgst -sha256"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $digest_cmd ]]; then
|
||||||
|
SIGNOZ_INSTALLATION_ID="$sysinfo"
|
||||||
|
else
|
||||||
|
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||||
|
fi
|
||||||
|
|
||||||
|
# echo ""
|
||||||
|
|
||||||
|
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||||
|
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||||
|
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||||
|
|
||||||
|
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||||
|
# do
|
||||||
|
# # echo $choice_setup
|
||||||
|
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||||
|
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||||
|
# # echo $choice_setup
|
||||||
|
# done
|
||||||
|
|
||||||
|
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||||
|
# setup_type='clickhouse'
|
||||||
|
# fi
|
||||||
|
|
||||||
|
setup_type='clickhouse'
|
||||||
|
|
||||||
|
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||||
|
|
||||||
# Run bye if failure happens
|
# Run bye if failure happens
|
||||||
trap bye EXIT
|
trap bye EXIT
|
||||||
|
|
||||||
|
URL="https://api.segment.io/v1/track"
|
||||||
|
HEADER_1="Content-Type: application/json"
|
||||||
|
HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'" } }'
|
send_event() {
|
||||||
URL="https://app.posthog.com/capture"
|
error=""
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
case "$1" in
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
'install_started')
|
||||||
elif has_wget; then
|
event="Installation Started"
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
;;
|
||||||
fi
|
'os_not_supported')
|
||||||
|
event="Installation Error"
|
||||||
|
error="OS Not Supported"
|
||||||
|
;;
|
||||||
|
'docker_not_installed')
|
||||||
|
event="Installation Error"
|
||||||
|
error="Docker not installed"
|
||||||
|
;;
|
||||||
|
'docker_compose_not_found')
|
||||||
|
event="Installation Error"
|
||||||
|
event="Docker Compose not found"
|
||||||
|
;;
|
||||||
|
'port_not_available')
|
||||||
|
event="Installation Error"
|
||||||
|
error="port not available"
|
||||||
|
;;
|
||||||
|
'installation_error_checks')
|
||||||
|
event="Installation Error - Checks"
|
||||||
|
error="Containers not started"
|
||||||
|
others='"data": "some_checks",'
|
||||||
|
;;
|
||||||
|
'installation_support')
|
||||||
|
event="Installation Support"
|
||||||
|
others='"email": "'"$email"'",'
|
||||||
|
;;
|
||||||
|
'installation_success')
|
||||||
|
event="Installation Success"
|
||||||
|
;;
|
||||||
|
'identify_successful_installation')
|
||||||
|
event="Identify Successful Installation"
|
||||||
|
others='"email": "'"$email"'",'
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_error "unknown event type: $1"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [[ "$error" != "" ]]; then
|
||||||
if [[ $desired_os -eq 0 ]];then
|
error='"error": "'"$error"'", '
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported" } }'
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
DATA='{ "anonymousId": "'"$SIGNOZ_INSTALLATION_ID"'", "event": "'"$event"'", "properties": { "os": "'"$os"'", '"$error $others"' "setup_type": "'"$setup_type"'" } }'
|
||||||
|
|
||||||
|
if has_curl; then
|
||||||
|
curl -sfL -d "$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
|
||||||
|
elif has_wget; then
|
||||||
|
wget -q --post-data="$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
send_event "install_started"
|
||||||
|
|
||||||
|
if [[ $desired_os -eq 0 ]]; then
|
||||||
|
send_event "os_not_supported"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# check_ports_occupied
|
# check_ports_occupied
|
||||||
|
|
||||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||||
if ! is_command_present docker; then
|
if ! is_command_present docker; then
|
||||||
|
|
||||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||||
|
request_sudo
|
||||||
install_docker
|
install_docker
|
||||||
else
|
# enable docker without sudo from next reboot
|
||||||
|
sudo usermod -aG docker "${USER}"
|
||||||
|
elif is_mac; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||||
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||||
echo "https://docs.docker.com/docker-for-mac/install/"
|
echo "https://docs.docker.com/docker-for-mac/install/"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed" } }'
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
send_event "docker_not_installed"
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
exit 1
|
||||||
elif has_wget; then
|
else
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
echo ""
|
||||||
fi
|
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||||
|
echo "Docker must be installed manually on your machine to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||||
|
echo "https://docs.docker.com/get-docker/"
|
||||||
|
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|
||||||
|
send_event "docker_not_installed"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install docker-compose
|
# Install docker-compose
|
||||||
if ! is_command_present docker-compose; then
|
if ! is_command_present docker-compose; then
|
||||||
|
request_sudo
|
||||||
install_docker_compose
|
install_docker_compose
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if ! is_command_present docker-compose; then
|
|
||||||
# install_docker_machine
|
|
||||||
# docker-machine create -d virtualbox --virtualbox-memory 3584 signoz
|
|
||||||
|
|
||||||
# fi
|
|
||||||
|
|
||||||
|
|
||||||
start_docker
|
start_docker
|
||||||
|
|
||||||
|
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Pulling the latest container images for SigNoz. To run as sudo it will ask for system password."
|
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||||
sudo docker-compose -f ./docker/docker-compose-tiny.yaml pull
|
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Starting the SigNoz containers. It may take a few minute ..."
|
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||||
echo
|
echo
|
||||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||||
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||||
|
|
||||||
wait_for_containers_start 60
|
wait_for_containers_start 60
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
if [[ $status_code -ne 200 ]]; then
|
if [[ $status_code -ne 200 ]]; then
|
||||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||||
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
|
|
||||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
|
||||||
|
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||||
|
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|
||||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
send_event "installation_error_checks"
|
||||||
|
|
||||||
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
|
|
||||||
|
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
else
|
else
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
|
send_event "installation_success"
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
|
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
|
||||||
echo "Your installation is complete!"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Your frontend is running on 'http://localhost:3000'."
|
echo "🟢 Your installation is complete!"
|
||||||
|
echo ""
|
||||||
|
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Need help Getting Started?"
|
echo "👉 Need help Getting Started?"
|
||||||
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
echo -e "Join us on Slack https://signoz.io/slack"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Please share your email to receive support & updates about SigNoz!"
|
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
|
||||||
read -rp 'Email: ' email
|
read -rp 'Email: ' email
|
||||||
|
|
||||||
while [[ $email == "" ]]
|
while [[ $email == "" ]]
|
||||||
@@ -436,40 +528,7 @@ else
|
|||||||
read -rp 'Email: ' email
|
read -rp 'Email: ' email
|
||||||
done
|
done
|
||||||
|
|
||||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
|
send_event "identify_successful_installation"
|
||||||
URL="https://app.posthog.com/capture"
|
|
||||||
HEADER="Content-Type: application/json"
|
|
||||||
|
|
||||||
if has_curl; then
|
|
||||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
elif has_wget; then
|
|
||||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "\nThank you!\n"
|
echo -e "\n🙏 Thank you!\n"
|
||||||
|
|
||||||
|
|
||||||
##### Changing default memory limit of docker ############
|
|
||||||
# # Check if memory is less and Confirm to increase size of docker machine
|
|
||||||
# # https://github.com/docker/machine/releases
|
|
||||||
# # On OS X
|
|
||||||
|
|
||||||
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine && \
|
|
||||||
# chmod +x /usr/local/bin/docker-machine
|
|
||||||
# # On Linux
|
|
||||||
|
|
||||||
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
|
|
||||||
# chmod +x /tmp/docker-machine &&
|
|
||||||
# sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
|
||||||
|
|
||||||
# VBoxManage list vms
|
|
||||||
# docker-machine stop
|
|
||||||
# VBoxManage modifyvm default --cpus 2
|
|
||||||
# VBoxManage modifyvm default --memory 4096
|
|
||||||
# docker-machine start
|
|
||||||
|
|
||||||
# VBoxManage showvminfo default | grep Memory
|
|
||||||
# VBoxManage showvminfo default | grep CPU
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: retention-config
|
|
||||||
data:
|
|
||||||
retention-spec.json: |
|
|
||||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: set-retention
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": post-install,post-upgrade
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 100
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: set-retention
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
volumeMounts:
|
|
||||||
- name: retention-config-volume
|
|
||||||
mountPath: /app/retention-spec.json
|
|
||||||
subPath: retention-spec.json
|
|
||||||
args:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://signoz-druid-router:8888/druid/coordinator/v1/rules/flattened_spans"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- name: retention-config-volume
|
|
||||||
configMap:
|
|
||||||
name: retention-config
|
|
||||||
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 8
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: supervisor-config
|
|
||||||
data:
|
|
||||||
supervisor-spec.json: |
|
|
||||||
{
|
|
||||||
"type": "kafka",
|
|
||||||
"dataSchema": {
|
|
||||||
"dataSource": "flattened_spans",
|
|
||||||
"parser": {
|
|
||||||
"type": "string",
|
|
||||||
"parseSpec": {
|
|
||||||
"format": "json",
|
|
||||||
"timestampSpec": {
|
|
||||||
"column": "StartTimeUnixNano",
|
|
||||||
"format": "nano"
|
|
||||||
},
|
|
||||||
"dimensionsSpec": {
|
|
||||||
"dimensions": [
|
|
||||||
"TraceId",
|
|
||||||
"SpanId",
|
|
||||||
"ParentSpanId",
|
|
||||||
"Name",
|
|
||||||
"ServiceName",
|
|
||||||
"References",
|
|
||||||
"Tags",
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"name": "TagsKeys",
|
|
||||||
"multiValueHandling": "ARRAY"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"name": "TagsValues",
|
|
||||||
"multiValueHandling": "ARRAY"
|
|
||||||
},
|
|
||||||
{ "name": "DurationNano", "type": "Long" },
|
|
||||||
{ "name": "Kind", "type": "int" },
|
|
||||||
{ "name": "StatusCode", "type": "int" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"metricsSpec" : [
|
|
||||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
|
||||||
],
|
|
||||||
"granularitySpec": {
|
|
||||||
"type": "uniform",
|
|
||||||
"segmentGranularity": "DAY",
|
|
||||||
"queryGranularity": "NONE",
|
|
||||||
"rollup": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tuningConfig": {
|
|
||||||
"type": "kafka",
|
|
||||||
"reportParseExceptions": true
|
|
||||||
},
|
|
||||||
"ioConfig": {
|
|
||||||
"topic": "flattened_spans",
|
|
||||||
"replicas": 1,
|
|
||||||
"taskDuration": "PT20M",
|
|
||||||
"completionTimeout": "PT30M",
|
|
||||||
"consumerProperties": {
|
|
||||||
"bootstrap.servers": "signoz-kafka:9092"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: create-supervisor
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": post-install,post-upgrade
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 100
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: create-supervisor
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
volumeMounts:
|
|
||||||
- name: supervisor-config-volume
|
|
||||||
mountPath: /app/supervisor-spec.json
|
|
||||||
subPath: supervisor-spec.json
|
|
||||||
args:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://signoz-druid-router:8888/druid/indexer/v1/supervisor"
|
|
||||||
volumes:
|
|
||||||
- name: supervisor-config-volume
|
|
||||||
configMap:
|
|
||||||
name: supervisor-config
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 8
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: otel-collector-conf
|
|
||||||
labels:
|
|
||||||
app: opentelemetry
|
|
||||||
component: otel-collector-conf
|
|
||||||
data:
|
|
||||||
otel-collector-config: |
|
|
||||||
receivers:
|
|
||||||
jaeger:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
thrift_http:
|
|
||||||
otlp:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
http:
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 1000
|
|
||||||
timeout: 10s
|
|
||||||
memory_limiter:
|
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
|
||||||
ballast_size_mib: 683
|
|
||||||
# 80% of maximum memory up to 2G
|
|
||||||
limit_mib: 1500
|
|
||||||
# 25% of limit up to 2G
|
|
||||||
spike_limit_mib: 512
|
|
||||||
check_interval: 5s
|
|
||||||
queued_retry:
|
|
||||||
num_workers: 4
|
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure: true
|
|
||||||
extensions:
|
|
||||||
health_check: {}
|
|
||||||
zpages: {}
|
|
||||||
exporters:
|
|
||||||
kafka:
|
|
||||||
brokers:
|
|
||||||
- signoz-kafka:9092
|
|
||||||
protocol_version: 2.0.0
|
|
||||||
service:
|
|
||||||
extensions: [health_check, zpages]
|
|
||||||
pipelines:
|
|
||||||
traces:
|
|
||||||
receivers: [jaeger, otlp]
|
|
||||||
processors: [memory_limiter, batch, queued_retry]
|
|
||||||
exporters: [kafka]
|
|
||||||
metrics:
|
|
||||||
receivers: [otlp]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [kafka]
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: otel-collector
|
|
||||||
labels:
|
|
||||||
app: opentelemetry
|
|
||||||
component: otel-collector
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: opentelemetry
|
|
||||||
component: otel-collector
|
|
||||||
minReadySeconds: 5
|
|
||||||
progressDeadlineSeconds: 120
|
|
||||||
replicas: 1 #TODO - adjust this to your own requirements
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: opentelemetry
|
|
||||||
component: otel-collector
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- command:
|
|
||||||
- "/otelcol"
|
|
||||||
- "--config=/conf/otel-collector-config.yaml"
|
|
||||||
# Memory Ballast size should be max 1/3 to 1/2 of memory.
|
|
||||||
- "--mem-ballast-size-mib=683"
|
|
||||||
image: otel/opentelemetry-collector:0.18.0
|
|
||||||
name: otel-collector
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 1
|
|
||||||
memory: 2Gi
|
|
||||||
requests:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 400Mi
|
|
||||||
ports:
|
|
||||||
- containerPort: 55679 # Default endpoint for ZPages.
|
|
||||||
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
|
|
||||||
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
|
|
||||||
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
|
|
||||||
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
|
|
||||||
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
|
|
||||||
- containerPort: 9411 # Default endpoint for Zipkin receiver.
|
|
||||||
- containerPort: 8888 # Default endpoint for querying metrics.
|
|
||||||
volumeMounts:
|
|
||||||
- name: otel-collector-config-vol
|
|
||||||
mountPath: /conf
|
|
||||||
# - name: otel-collector-secrets
|
|
||||||
# mountPath: /secrets
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 13133 # Health Check extension default port.
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 13133 # Health Check extension default port.
|
|
||||||
volumes:
|
|
||||||
- configMap:
|
|
||||||
name: otel-collector-conf
|
|
||||||
items:
|
|
||||||
- key: otel-collector-config
|
|
||||||
path: otel-collector-config.yaml
|
|
||||||
name: otel-collector-config-vol
|
|
||||||
# - secret:
|
|
||||||
# name: otel-collector-secrets
|
|
||||||
# items:
|
|
||||||
# - key: cert.pem
|
|
||||||
# path: cert.pem
|
|
||||||
# - key: key.pem
|
|
||||||
# path: key.pem
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: otel-collector
|
|
||||||
labels:
|
|
||||||
app: opentelemetry
|
|
||||||
component: otel-collector
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- name: otlp # Default endpoint for OpenTelemetry receiver.
|
|
||||||
port: 55680
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: 55680
|
|
||||||
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
|
|
||||||
port: 55681
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: 55681
|
|
||||||
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
|
|
||||||
port: 4317
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: 4317
|
|
||||||
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
|
|
||||||
port: 14250
|
|
||||||
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
|
|
||||||
port: 14268
|
|
||||||
- name: zipkin # Default endpoint for Zipkin receiver.
|
|
||||||
port: 9411
|
|
||||||
- name: metrics # Default endpoint for querying metrics.
|
|
||||||
port: 8888
|
|
||||||
selector:
|
|
||||||
component: otel-collector
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
dependencies:
|
|
||||||
- name: zookeeper
|
|
||||||
repository: https://charts.bitnami.com/bitnami
|
|
||||||
version: 6.0.0
|
|
||||||
- name: kafka
|
|
||||||
repository: https://charts.bitnami.com/bitnami
|
|
||||||
version: 12.0.0
|
|
||||||
- name: druid
|
|
||||||
repository: https://charts.helm.sh/incubator
|
|
||||||
version: 0.2.18
|
|
||||||
- name: flattener-processor
|
|
||||||
repository: file://./signoz-charts/flattener-processor
|
|
||||||
version: 0.2.0
|
|
||||||
- name: query-service
|
|
||||||
repository: file://./signoz-charts/query-service
|
|
||||||
version: 0.2.2
|
|
||||||
- name: frontend
|
|
||||||
repository: file://./signoz-charts/frontend
|
|
||||||
version: 0.2.3
|
|
||||||
digest: sha256:31c8e3a8a4c89d0e6071c6687f074e88b3eed8ce86310314e5b6f94e5d5017be
|
|
||||||
generated: "2021-05-18T16:54:30.24831+05:30"
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
apiVersion: v2
|
|
||||||
name: signoz-platform
|
|
||||||
description: SigNoz Observability Platform Helm Chart
|
|
||||||
|
|
||||||
# A chart can be either an 'application' or a 'library' chart.
|
|
||||||
#
|
|
||||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
|
||||||
# to be deployed.
|
|
||||||
#
|
|
||||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
|
||||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
|
||||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
|
||||||
type: application
|
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
|
||||||
# to the chart and its templates, including the app version.
|
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
|
||||||
version: 0.2.2
|
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
|
||||||
appVersion: 0.2.2
|
|
||||||
|
|
||||||
dependencies:
|
|
||||||
- name: zookeeper
|
|
||||||
repository: "https://charts.bitnami.com/bitnami"
|
|
||||||
version: 6.0.0
|
|
||||||
- name: kafka
|
|
||||||
repository: "https://charts.bitnami.com/bitnami"
|
|
||||||
version: 12.0.0
|
|
||||||
- name: druid
|
|
||||||
repository: "https://charts.helm.sh/incubator"
|
|
||||||
version: 0.2.18
|
|
||||||
- name: flattener-processor
|
|
||||||
repository: "file://./signoz-charts/flattener-processor"
|
|
||||||
version: 0.2.0
|
|
||||||
- name: query-service
|
|
||||||
repository: "file://./signoz-charts/query-service"
|
|
||||||
version: 0.2.2
|
|
||||||
- name: frontend
|
|
||||||
repository: "file://./signoz-charts/frontend"
|
|
||||||
version: 0.2.3
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*.orig
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
.vscode/
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
apiVersion: v2
|
|
||||||
name: flattener-processor
|
|
||||||
description: A Helm chart for Kubernetes
|
|
||||||
|
|
||||||
# A chart can be either an 'application' or a 'library' chart.
|
|
||||||
#
|
|
||||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
|
||||||
# to be deployed.
|
|
||||||
#
|
|
||||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
|
||||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
|
||||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
|
||||||
type: application
|
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
|
||||||
# to the chart and its templates, including the app version.
|
|
||||||
version: 0.2.0
|
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
|
||||||
# incremented each time you make changes to the application.
|
|
||||||
appVersion: 0.2.0
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
1. Get the application URL by running these commands:
|
|
||||||
{{- if .Values.ingress.enabled }}
|
|
||||||
{{- range $host := .Values.ingress.hosts }}
|
|
||||||
{{- range .paths }}
|
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
|
||||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flattener-processor.fullname" . }})
|
|
||||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
||||||
echo http://$NODE_IP:$NODE_PORT
|
|
||||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
|
||||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
||||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flattener-processor.fullname" . }}'
|
|
||||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flattener-processor.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
|
||||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
|
||||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
|
||||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flattener-processor.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
|
||||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
|
||||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "flattener-processor.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "flattener-processor.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "flattener-processor.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Common labels
|
|
||||||
*/}}
|
|
||||||
{{- define "flattener-processor.labels" -}}
|
|
||||||
helm.sh/chart: {{ include "flattener-processor.chart" . }}
|
|
||||||
{{ include "flattener-processor.selectorLabels" . }}
|
|
||||||
{{- if .Chart.AppVersion }}
|
|
||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
|
||||||
{{- end }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Selector labels
|
|
||||||
*/}}
|
|
||||||
{{- define "flattener-processor.selectorLabels" -}}
|
|
||||||
app.kubernetes.io/name: {{ include "flattener-processor.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the name of the service account to use
|
|
||||||
*/}}
|
|
||||||
{{- define "flattener-processor.serviceAccountName" -}}
|
|
||||||
{{- if .Values.serviceAccount.create -}}
|
|
||||||
{{ default (include "flattener-processor.fullname" .) .Values.serviceAccount.name }}
|
|
||||||
{{- else -}}
|
|
||||||
{{ default "default" .Values.serviceAccount.name }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ include "flattener-processor.fullname" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.replicaCount }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
{{- include "flattener-processor.selectorLabels" . | nindent 6 }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
{{- include "flattener-processor.selectorLabels" . | nindent 8 }}
|
|
||||||
spec:
|
|
||||||
{{- with .Values.imagePullSecrets }}
|
|
||||||
imagePullSecrets:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
serviceAccountName: {{ include "flattener-processor.serviceAccountName" . }}
|
|
||||||
securityContext:
|
|
||||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
securityContext:
|
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
command:
|
|
||||||
- "/root/flattener"
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 8080
|
|
||||||
protocol: TCP
|
|
||||||
env:
|
|
||||||
- name: KAFKA_BROKER
|
|
||||||
value: {{ .Values.configVars.KAFKA_BROKER }}
|
|
||||||
- name: KAFKA_INPUT_TOPIC
|
|
||||||
value: {{ .Values.configVars.KAFKA_INPUT_TOPIC }}
|
|
||||||
- name: KAFKA_OUTPUT_TOPIC
|
|
||||||
value: {{ .Values.configVars.KAFKA_OUTPUT_TOPIC }}
|
|
||||||
|
|
||||||
# livenessProbe:
|
|
||||||
# httpGet:
|
|
||||||
# path: /
|
|
||||||
# port: http
|
|
||||||
# readinessProbe:
|
|
||||||
# httpGet:
|
|
||||||
# path: /
|
|
||||||
# port: http
|
|
||||||
resources:
|
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
{{- $fullName := include "flattener-processor.fullname" . -}}
|
|
||||||
{{- $svcPort := .Values.service.port -}}
|
|
||||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
|
||||||
apiVersion: networking.k8s.io/v1beta1
|
|
||||||
{{- else -}}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
{{- end }}
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ $fullName }}
|
|
||||||
labels:
|
|
||||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
|
||||||
{{- with .Values.ingress.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . | quote }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ .host | quote }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
{{- range .paths }}
|
|
||||||
- path: {{ . }}
|
|
||||||
backend:
|
|
||||||
serviceName: {{ $fullName }}
|
|
||||||
servicePort: {{ $svcPort }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ include "flattener-processor.fullname" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
ports:
|
|
||||||
- port: {{ .Values.service.port }}
|
|
||||||
targetPort: http
|
|
||||||
protocol: TCP
|
|
||||||
name: http
|
|
||||||
selector:
|
|
||||||
{{- include "flattener-processor.selectorLabels" . | nindent 4 }}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{{- if .Values.serviceAccount.create -}}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: {{ include "flattener-processor.serviceAccountName" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
|
||||||
{{- with .Values.serviceAccount.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: "{{ include "flattener-processor.fullname" . }}-test-connection"
|
|
||||||
labels:
|
|
||||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": test-success
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: wget
|
|
||||||
image: busybox
|
|
||||||
command: ['wget']
|
|
||||||
args: ['{{ include "flattener-processor.fullname" . }}:{{ .Values.service.port }}']
|
|
||||||
restartPolicy: Never
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user