Compare commits
704 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf2f3f8f5e | ||
|
|
d92aad38df | ||
|
|
78d13c94ae | ||
|
|
73e699080d | ||
|
|
b6aa378fae | ||
|
|
b5fe3d7fa1 | ||
|
|
7c14a75c68 | ||
|
|
10c6325e46 | ||
|
|
e4883495c3 | ||
|
|
b9d63d6b8f | ||
|
|
65804f245c | ||
|
|
964b819f20 | ||
|
|
e22be60a9e | ||
|
|
b6a6833a64 | ||
|
|
c90e9ffa34 | ||
|
|
c5c7fb238f | ||
|
|
4ad79bee18 | ||
|
|
bebfaa1c4c | ||
|
|
6fb7e34dbc | ||
|
|
a2e1c41343 | ||
|
|
46f258747a | ||
|
|
11c352741d | ||
|
|
a63267cf90 | ||
|
|
3200248e98 | ||
|
|
a8c7237bbb | ||
|
|
3a287b2b16 | ||
|
|
c3d665e119 | ||
|
|
8d03569a0a | ||
|
|
0620cacb0b | ||
|
|
7da69f6a75 | ||
|
|
7aeaecaf1f | ||
|
|
3ea36092f6 | ||
|
|
8db4793ad6 | ||
|
|
83f3180641 | ||
|
|
64e638fd58 | ||
|
|
5554cce379 | ||
|
|
3e2a6df200 | ||
|
|
3dc1dc970f | ||
|
|
0ceaa56679 | ||
|
|
ab52538e91 | ||
|
|
ef69505bf9 | ||
|
|
61b79742dc | ||
|
|
4d1516e3fc | ||
|
|
0b08c80038 | ||
|
|
a84754e8a8 | ||
|
|
a09a4c264e | ||
|
|
54e09e1292 | ||
|
|
8477aebc8e | ||
|
|
d7f7f20520 | ||
|
|
4ee92b7c55 | ||
|
|
80c80b2180 | ||
|
|
da368ab5e8 | ||
|
|
091d769ad8 | ||
|
|
be814afeea | ||
|
|
6697702c0f | ||
|
|
5e93f266c1 | ||
|
|
352c7ac581 | ||
|
|
db8f35cca2 | ||
|
|
3af1d2b5bb | ||
|
|
58038c222f | ||
|
|
b3b5459a08 | ||
|
|
1bfc9877fc | ||
|
|
8ce806169f | ||
|
|
3c7e0f66fa | ||
|
|
cbdeb5ad03 | ||
|
|
5bbe1246cc | ||
|
|
789d65d7c4 | ||
|
|
5cbc8af4af | ||
|
|
2cffe0c53e | ||
|
|
cf0eb44143 | ||
|
|
d7ce786f4b | ||
|
|
ae5d4326a2 | ||
|
|
6fa0209104 | ||
|
|
50501ea80f | ||
|
|
669dc05eec | ||
|
|
e839920b3b | ||
|
|
32b4bbcaec | ||
|
|
58b0c08d71 | ||
|
|
dd9cbcee33 | ||
|
|
9c3b4508be | ||
|
|
450407d0bf | ||
|
|
276b26b170 | ||
|
|
475723a03a | ||
|
|
897728cc71 | ||
|
|
bdf78cbf2c | ||
|
|
e88cfcd4da | ||
|
|
90566360ae | ||
|
|
0a6fa0ee85 | ||
|
|
ba35b3e442 | ||
|
|
73c2137cd7 | ||
|
|
dbe68c064c | ||
|
|
ba7427f280 | ||
|
|
6dbc11991b | ||
|
|
eeae71163c | ||
|
|
a25e7a64ce | ||
|
|
d0e272b679 | ||
|
|
16ff59b4de | ||
|
|
f2074a9d0e | ||
|
|
47e6e00a64 | ||
|
|
282c47def8 | ||
|
|
9d3fc493a3 | ||
|
|
b2afb9aabc | ||
|
|
a733adad2c | ||
|
|
cc18cc9087 | ||
|
|
ecb2ed8ac8 | ||
|
|
31931e5a6c | ||
|
|
31848c488d | ||
|
|
bdcc997672 | ||
|
|
d68334b2ca | ||
|
|
3ebded66ea | ||
|
|
2ed24df250 | ||
|
|
5a34ce2221 | ||
|
|
aae6a1adf1 | ||
|
|
bef83d30cc | ||
|
|
1ebf3dbf65 | ||
|
|
f57808bdb4 | ||
|
|
6bdcd4f5bb | ||
|
|
d726ad9ca6 | ||
|
|
4ed3295b80 | ||
|
|
72dc4d62ce | ||
|
|
186f4dca71 | ||
|
|
e4f2219f8c | ||
|
|
fe9a6c2448 | ||
|
|
5c2a875211 | ||
|
|
6dab77409d | ||
|
|
0f811af34e | ||
|
|
bdbcbb5f6c | ||
|
|
ae91d7e8a9 | ||
|
|
64927acd97 | ||
|
|
9dae957c8f | ||
|
|
afbcde5edc | ||
|
|
b8c3fd1cbf | ||
|
|
93cf5dfa46 | ||
|
|
d2c28a47c2 | ||
|
|
9c68c6af93 | ||
|
|
3771f85c7d | ||
|
|
b39e0465b0 | ||
|
|
bc97ea8fc0 | ||
|
|
1e980c3886 | ||
|
|
5ec52f03ad | ||
|
|
4aab923e40 | ||
|
|
17b0ee5434 | ||
|
|
08c3c4c51c | ||
|
|
5f802e0e20 | ||
|
|
63e663a92d | ||
|
|
d21ab7b82d | ||
|
|
84b876170d | ||
|
|
88d8dba90e | ||
|
|
d7d0d70aa5 | ||
|
|
671b441ec9 | ||
|
|
729c7fce7b | ||
|
|
224ec8d0d9 | ||
|
|
7eed865660 | ||
|
|
241121ebec | ||
|
|
15af158a9c | ||
|
|
2f02aeb031 | ||
|
|
3603e497a6 | ||
|
|
070d32a0ef | ||
|
|
0b36da714f | ||
|
|
ce0ac1e3af | ||
|
|
bcb5256de0 | ||
|
|
fdca72b9b2 | ||
|
|
7f64dfd023 | ||
|
|
8871d53ae0 | ||
|
|
2313ec3f9a | ||
|
|
56208c9b06 | ||
|
|
84e281271c | ||
|
|
43e4f637d1 | ||
|
|
c156b9c403 | ||
|
|
9885572842 | ||
|
|
4803fd9c8e | ||
|
|
c2fe35388e | ||
|
|
ba5e3dcfd3 | ||
|
|
9c8c31d912 | ||
|
|
469254e9fc | ||
|
|
1f2ec0d728 | ||
|
|
ff1fc83b66 | ||
|
|
0a5eff2255 | ||
|
|
24e84bac2a | ||
|
|
db00a78a4e | ||
|
|
4d2e8b0ea5 | ||
|
|
4f12f8c85c | ||
|
|
fabab345cb | ||
|
|
00355b3383 | ||
|
|
c16ae790d4 | ||
|
|
c6d57a7a53 | ||
|
|
d8775c91d7 | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
b3dfd567e0 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
660391c360 | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
8a0bcf6cd9 | ||
|
|
0c06c5ee0e | ||
|
|
f3610ffe55 | ||
|
|
d150cfa46c | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
3995de16f0 | ||
|
|
f149258de2 | ||
|
|
da386b0e8e | ||
|
|
75cdac376f | ||
|
|
0ef13a89ed | ||
|
|
084d8ecccd | ||
|
|
b9f3663b6c | ||
|
|
4067aa5025 | ||
|
|
ebf9316714 | ||
|
|
f5009abca6 | ||
|
|
b16a793cbc | ||
|
|
374a2415d9 | ||
|
|
3789e25a1e | ||
|
|
10ab057e29 | ||
|
|
41b9129145 | ||
|
|
f5d10b72f0 | ||
|
|
6fb6a576aa | ||
|
|
7cf567792a | ||
|
|
fe18e85e36 | ||
|
|
147476d802 | ||
|
|
c94f23a710 | ||
|
|
1a2ef4fde6 | ||
|
|
6c505f9e86 | ||
|
|
fb97540c7c | ||
|
|
9cf5c7ef74 | ||
|
|
6223e89d4c | ||
|
|
62f8cddc27 | ||
|
|
ffae767fab | ||
|
|
c23f97c3d0 | ||
|
|
11eb1e4f72 | ||
|
|
0554ed7ecb | ||
|
|
ca4ce0d380 | ||
|
|
65f50bb70d | ||
|
|
dbe9f3a034 | ||
|
|
7cdd136f61 | ||
|
|
21d5e0b71c | ||
|
|
fe53aa412b | ||
|
|
6c5a48082b | ||
|
|
b7adc27f02 | ||
|
|
67b4290846 | ||
|
|
8a7cbc8ad3 | ||
|
|
c74d87a21a | ||
|
|
6486425f46 | ||
|
|
5b316afa12 | ||
|
|
2dcc6fda77 | ||
|
|
a2f570d78c | ||
|
|
ef209e11d5 | ||
|
|
1851e76bca | ||
|
|
fa23050916 | ||
|
|
79475bde71 | ||
|
|
039201acae | ||
|
|
22454abc4a | ||
|
|
4c8b7af0eb | ||
|
|
5caf94f024 | ||
|
|
ce0b37ca2e | ||
|
|
5f529e1c10 | ||
|
|
05c923df9b | ||
|
|
90637212bc | ||
|
|
b58f45c268 | ||
|
|
6a6fd44719 | ||
|
|
81cc120539 | ||
|
|
831381a1ff | ||
|
|
fd0656e0fc | ||
|
|
e217ea0c9c | ||
|
|
bdf9333dcf | ||
|
|
eae97d6ffc | ||
|
|
9f5241e82c | ||
|
|
284eda4072 | ||
|
|
63693a4185 | ||
|
|
d9cf9071d3 | ||
|
|
5e41c7f62b | ||
|
|
e903277143 | ||
|
|
f2def38df8 | ||
|
|
71e742fb2b | ||
|
|
571e087f31 | ||
|
|
3e5f9f3b25 | ||
|
|
c969b5f329 | ||
|
|
5bcf42d398 | ||
|
|
c81b0b2a8b | ||
|
|
d52308c9b5 | ||
|
|
7948bca710 | ||
|
|
29c0b43481 | ||
|
|
9351fd09c2 | ||
|
|
59f32884d2 | ||
|
|
6ccdc5296e | ||
|
|
3ef9d96678 | ||
|
|
642ece288e | ||
|
|
3ab4f71aa1 | ||
|
|
b5be770a03 | ||
|
|
08e3428744 | ||
|
|
b335d440cf | ||
|
|
1293378c5c | ||
|
|
5424c7714f | ||
|
|
95311db543 | ||
|
|
bf52722689 | ||
|
|
6064840dd1 | ||
|
|
182adc551c | ||
|
|
2b5b79e34a | ||
|
|
508c6ced80 | ||
|
|
3c2173de9e | ||
|
|
9a6bcaadf8 | ||
|
|
08bbb0259d | ||
|
|
93638d5615 | ||
|
|
844ca57686 | ||
|
|
b2eec25f33 | ||
|
|
61d01fa2d5 | ||
|
|
a6bf6e4e07 | ||
|
|
d454482f43 | ||
|
|
f6aece6349 | ||
|
|
dc9508269d | ||
|
|
a6c41f312d | ||
|
|
f487f7420b | ||
|
|
da8f3a6e81 | ||
|
|
d102c94670 | ||
|
|
60288f7ba0 | ||
|
|
0cbe17a315 | ||
|
|
dce9f36a8e | ||
|
|
aa5100261d | ||
|
|
f4cc2a3a05 | ||
|
|
041a5249b3 | ||
|
|
a767697a86 | ||
|
|
71cb70c62c | ||
|
|
647cabc4f4 | ||
|
|
e864e33ad3 | ||
|
|
5bdbe792f5 | ||
|
|
399efb0fb2 | ||
|
|
4b72de6884 | ||
|
|
9f1473e7de | ||
|
|
d6c4df8b4b | ||
|
|
7150971dc0 | ||
|
|
d0846b8dd2 | ||
|
|
ead6885b29 | ||
|
|
d72dacdc1f | ||
|
|
1d6ddd4890 | ||
|
|
58daca1579 | ||
|
|
1e522ad8f1 | ||
|
|
8809105a8d | ||
|
|
064c3e0449 | ||
|
|
2a348e916c | ||
|
|
5744193f50 | ||
|
|
ccf352f2db | ||
|
|
6e446dc0ab | ||
|
|
566c2becdf | ||
|
|
3b3fd2b3a9 | ||
|
|
eae53d9eff | ||
|
|
42842b6b17 | ||
|
|
95f8dfb4bc | ||
|
|
a8c5934fc5 | ||
|
|
3f2a4d6eac | ||
|
|
170609a81f | ||
|
|
76fccbbba4 | ||
|
|
147ed9f24b | ||
|
|
a69bc321a9 | ||
|
|
c9e02a8b25 | ||
|
|
24d6a1e7b2 | ||
|
|
a0efa63185 | ||
|
|
fd83cea9a0 | ||
|
|
5be1eb58b2 | ||
|
|
8367c106bc | ||
|
|
8064ae1f37 | ||
|
|
ab4d9af442 | ||
|
|
eb0d3374d5 | ||
|
|
6c4c814b3f | ||
|
|
32e8e48928 | ||
|
|
53e7037f48 | ||
|
|
a566b5dc97 | ||
|
|
4dc668fd13 | ||
|
|
d085506d3e | ||
|
|
1b28a4e6f5 | ||
|
|
20e924b116 | ||
|
|
1d28ceb3d7 | ||
|
|
0ff4c040bf | ||
|
|
1002ab553e | ||
|
|
3dc94c8da7 | ||
|
|
5a5aca2113 | ||
|
|
cb22117a0f | ||
|
|
739946fa47 | ||
|
|
7939902f03 | ||
|
|
d34e08fa3d | ||
|
|
5556d1d6fc | ||
|
|
d4d1104a53 | ||
|
|
225a345baa | ||
|
|
31443dabe7 | ||
|
|
0efb901863 | ||
|
|
eb4abe900c | ||
|
|
e7ba5f9f33 | ||
|
|
995232e057 | ||
|
|
cc5d47e3ee | ||
|
|
b1de6c1d7d | ||
|
|
84bfe11285 | ||
|
|
ca78947a55 | ||
|
|
ac49f84982 | ||
|
|
cc47f02ebf | ||
|
|
ac70240b72 | ||
|
|
78b1a750fa | ||
|
|
d5a6336239 | ||
|
|
01bad0f18a | ||
|
|
1b79a9bf35 | ||
|
|
0426bf06eb | ||
|
|
3d8354fb99 | ||
|
|
696241b962 | ||
|
|
8a883f1b5e | ||
|
|
7765cee610 | ||
|
|
b958bad81f | ||
|
|
deff5d5e17 | ||
|
|
44d3f35a5f | ||
|
|
36d8bc7bc6 | ||
|
|
565dfd5b52 | ||
|
|
897c5d2371 | ||
|
|
f22d5f0fbd | ||
|
|
8c56d04988 | ||
|
|
18cfc40982 | ||
|
|
3c66f9d2dd | ||
|
|
4f3bb95a77 | ||
|
|
8aa5eb78b2 | ||
|
|
79a1f79b7c | ||
|
|
1c5c65ddf7 | ||
|
|
b2e78b9358 | ||
|
|
5e02bfe2e4 | ||
|
|
02d89a3a04 | ||
|
|
3ab0e1395a | ||
|
|
f1f606844a | ||
|
|
4e335054fb | ||
|
|
c902a6bac8 | ||
|
|
c00f0f159b | ||
|
|
86bdb9a5ad | ||
|
|
044f02c7c7 | ||
|
|
561d18efec | ||
|
|
ab10a699b1 | ||
|
|
e28733d246 | ||
|
|
a238123eb2 | ||
|
|
4337ab5cd0 | ||
|
|
7a3a3b8d89 | ||
|
|
a9cbd12330 | ||
|
|
c320c20280 | ||
|
|
b3c2fe75d3 | ||
|
|
95d3a27769 | ||
|
|
67f09c6def | ||
|
|
eaeba43179 | ||
|
|
daadc584ea | ||
|
|
da8b16f588 | ||
|
|
17738a58a2 | ||
|
|
3ebffae1c6 | ||
|
|
2ca67f1017 | ||
|
|
00c7eccb0c | ||
|
|
7f3d9e2e35 | ||
|
|
a95656b3a0 | ||
|
|
9404768f9d | ||
|
|
7559445ebe | ||
|
|
112766b265 | ||
|
|
ccf5af089d | ||
|
|
0b6f31420b | ||
|
|
b4ce805c6f | ||
|
|
08f24fbdff | ||
|
|
191925b418 | ||
|
|
84b70c970f | ||
|
|
988ce36047 | ||
|
|
24a4177a73 | ||
|
|
08ca3b7849 | ||
|
|
d0723207c3 | ||
|
|
16cf829ec3 | ||
|
|
23f9949fad | ||
|
|
fafdd4b87f | ||
|
|
f2ace729fd | ||
|
|
f0c627eebe | ||
|
|
1bf8e6bef6 | ||
|
|
f37e6ef1d1 | ||
|
|
c3ebbfa8ca | ||
|
|
12970d6975 | ||
|
|
1112ff7e7a | ||
|
|
09fd877b2a | ||
|
|
c04c0284dc | ||
|
|
239cdad57b | ||
|
|
314f95a914 | ||
|
|
e070ba61cd | ||
|
|
79576b476f | ||
|
|
8e4f987cf6 | ||
|
|
3fe3bde0c7 | ||
|
|
efe57ff15d | ||
|
|
b8a6a27fad | ||
|
|
fb3dbcf662 | ||
|
|
cb04979bb7 | ||
|
|
967b83a5d0 | ||
|
|
3fd086db4d | ||
|
|
9aedcc1777 | ||
|
|
0fb5b90e4e | ||
|
|
91bdb77a0e | ||
|
|
e7d2bb13dc | ||
|
|
39c3f67d86 | ||
|
|
49d1015a72 | ||
|
|
f3b2f30c82 | ||
|
|
ff9d81aefc | ||
|
|
eb63b6da2a | ||
|
|
c9b07ee5dd | ||
|
|
bda8ddc0e4 | ||
|
|
540a682bf0 | ||
|
|
9f7c60d2c1 | ||
|
|
80a06300ff | ||
|
|
7fae5207d1 | ||
|
|
b4f781ad47 | ||
|
|
d9468d438d | ||
|
|
25559c8781 | ||
|
|
aa760336d0 | ||
|
|
46aaa8199d | ||
|
|
10faa6f42b | ||
|
|
2d5ad346a6 | ||
|
|
207360e074 | ||
|
|
a4b954e304 | ||
|
|
3e24e371f4 | ||
|
|
bdeadaeff6 | ||
|
|
43b39f1a7c | ||
|
|
6ab7fea8de | ||
|
|
05371085f9 | ||
|
|
abe4940e74 | ||
|
|
938f42bd4f | ||
|
|
1652f35f1a | ||
|
|
3a67d0237c | ||
|
|
7888865ddc | ||
|
|
25fb0deb8d | ||
|
|
b067db633a | ||
|
|
9094f20070 | ||
|
|
9129704a93 | ||
|
|
fc244edc50 | ||
|
|
cbe635bc94 | ||
|
|
bb0a7a956a | ||
|
|
2800b021e6 | ||
|
|
74170ffb4a | ||
|
|
ab72e92fc6 | ||
|
|
8b224dd59c | ||
|
|
ce77a3cd80 | ||
|
|
79fa4e7b74 | ||
|
|
90e97856a9 | ||
|
|
61fb11a232 | ||
|
|
729ea586a8 | ||
|
|
eb28459847 | ||
|
|
10fc3bf456 | ||
|
|
244a07aef8 | ||
|
|
92cf617a53 | ||
|
|
1eb333a890 | ||
|
|
ebc280db0e | ||
|
|
3beb7f1843 | ||
|
|
56c9ea5430 | ||
|
|
05c79b7119 | ||
|
|
a6da00f801 | ||
|
|
0514c5035e | ||
|
|
2df058825a | ||
|
|
a07b8999c0 | ||
|
|
5405f0d9ed | ||
|
|
00cefe2306 | ||
|
|
8e621b6a70 | ||
|
|
5aa46c7e96 | ||
|
|
c367f928c5 | ||
|
|
4c2f287e06 | ||
|
|
567c81c0f9 | ||
|
|
31f0a9f0b6 | ||
|
|
bad14a7d09 | ||
|
|
28e8cffeaa | ||
|
|
3302a84ab5 | ||
|
|
baba0c389c | ||
|
|
0d98a4fd0c | ||
|
|
09344cfb44 | ||
|
|
9a00dca749 | ||
|
|
e7253b7ca6 | ||
|
|
dddba68bfd | ||
|
|
40c287028b | ||
|
|
30124de409 |
33
.editorconfig
Normal file
33
.editorconfig
Normal file
@@ -0,0 +1,33 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
# Matches multiple files with brace expansion notation
|
||||
# Set default charset
|
||||
[*.{js,py}]
|
||||
charset = utf-8
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
# Indentation override for all JS under lib directory
|
||||
[lib/**.js]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -2,5 +2,6 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
/frontend/ @palash-signoz
|
||||
/frontend/ @palashgdev @pranshuchittora
|
||||
/deploy/ @prashant-shahi
|
||||
/pkg/query-service/ @srikanthccv @makeavish @nityanandagohain
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -26,6 +26,7 @@ assignees: ''
|
||||
* **Signoz version**:
|
||||
* **Browser version**:
|
||||
* **Your OS and version**:
|
||||
* **Your CPU Architecture**(ARM/Intel):
|
||||
|
||||
## Additional context
|
||||
|
||||
|
||||
55
.github/workflows/build.yaml
vendored
55
.github/workflows/build.yaml
vendored
@@ -1,50 +1,27 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'frontend/**'
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
get_filters:
|
||||
runs-on: ubuntu-latest
|
||||
# Set job outputs to values from filter step
|
||||
outputs:
|
||||
frontend: ${{ steps.filter.outputs.frontend }}
|
||||
query-service: ${{ steps.filter.outputs.query-service }}
|
||||
flattener: ${{ steps.filter.outputs.flattener }}
|
||||
steps:
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
frontend:
|
||||
- 'frontend/**'
|
||||
query-service:
|
||||
- 'pkg/query-service/**'
|
||||
flattener:
|
||||
- 'pkg/processors/flattener/**'
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run Prettier
|
||||
run: cd frontend && npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
continue-on-error: true
|
||||
- name: Run Jest
|
||||
run: cd frontend && npm run jest
|
||||
- name: TSC
|
||||
run: yarn tsc
|
||||
working-directory: ./frontend
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -52,9 +29,6 @@ jobs:
|
||||
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
@@ -62,16 +36,3 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
make build-query-service-amd64
|
||||
|
||||
build-flattener:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build flattener docker image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-flattener-amd64
|
||||
|
||||
17
.github/workflows/codeball.yml
vendored
Normal file
17
.github/workflows/codeball.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Codeball
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
codeball_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Codeball
|
||||
steps:
|
||||
# Run Codeball on all new Pull Requests 🚀
|
||||
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
|
||||
- name: Codeball
|
||||
uses: sturdy-dev/codeball-action@v2
|
||||
with:
|
||||
approvePullRequests: "true"
|
||||
labelPullRequestsWhenApproved: "true"
|
||||
labelPullRequestsWhenReviewNeeded: "false"
|
||||
failJobsWhenReviewNeeded: "false"
|
||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
env:
|
||||
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
jobs:
|
||||
create_issue_on_merge:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
uses: bahmutov/npm-install@v1
|
||||
with:
|
||||
install-command: yarn --frozen-lockfile
|
||||
- name: Comment on PR
|
||||
run: node create-issue.js
|
||||
22
.github/workflows/dependency-review.yml
vendored
Normal file
22
.github/workflows/dependency-review.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Dependency Review Action
|
||||
#
|
||||
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
|
||||
#
|
||||
# Source repository: https://github.com/actions/dependency-review-action
|
||||
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v3
|
||||
- name: 'Dependency Review'
|
||||
with:
|
||||
fail-on-severity: high
|
||||
uses: actions/dependency-review-action@v2
|
||||
5
.github/workflows/e2e-k3s.yaml
vendored
5
.github/workflows/e2e-k3s.yaml
vendored
@@ -52,14 +52,11 @@ jobs:
|
||||
helm install my-release signoz/signoz -n platform \
|
||||
--wait \
|
||||
--timeout 10m0s \
|
||||
--set cloud=null \
|
||||
--set frontend.service.type=LoadBalancer \
|
||||
--set query-service.image.tag=$DOCKER_TAG \
|
||||
--set queryService.image.tag=$DOCKER_TAG \
|
||||
--set frontend.image.tag=$DOCKER_TAG
|
||||
|
||||
# get pods, services and the container images
|
||||
kubectl describe deploy/my-release-frontend -n platform | grep Image
|
||||
kubectl describe statefulset/my-release-query-service -n platform | grep Image
|
||||
kubectl get pods -n platform
|
||||
kubectl get svc -n platform
|
||||
|
||||
|
||||
24
.github/workflows/playwright.yaml
vendored
Normal file
24
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Playwright Tests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
playwright:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "16.x"
|
||||
- name: Install dependencies
|
||||
run: CI=1 yarn install
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps
|
||||
- name: Run Playwright tests
|
||||
run: yarn playwright
|
||||
env:
|
||||
# This might depend on your test-runner/language binding
|
||||
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}
|
||||
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||
# a comment alerting users of this missing requirement.
|
||||
name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, synchronize, opened, reopened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
verify_linked_issue:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
25
.github/workflows/repo-stats.yml
vendored
Normal file
25
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 8 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: repostats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
uses: jgehrcke/github-repo-stats@v1.1.0
|
||||
with:
|
||||
# Define the stats repository (the repo to fetch
|
||||
# stats for and to generate the report for).
|
||||
# Remove the parameter when the stats repository
|
||||
# and the data repository are the same.
|
||||
repository: signoz/signoz
|
||||
# Set a GitHub API token that can read the stats
|
||||
# repository, and that can push to the data
|
||||
# repository (which this workflow file lives in),
|
||||
# to store data and the report files.
|
||||
ghtoken: ${{ github.token }}
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -15,6 +15,7 @@ frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
frontend/test-results
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
@@ -27,10 +28,6 @@ frontend/npm-debug.log*
|
||||
frontend/yarn-debug.log*
|
||||
frontend/yarn-error.log*
|
||||
frontend/src/constants/env.ts
|
||||
frontend/cypress/**/*.mp4
|
||||
|
||||
# env file for cypress
|
||||
frontend/cypress.env.json
|
||||
|
||||
.idea
|
||||
|
||||
@@ -42,4 +39,11 @@ frontend/cypress.env.json
|
||||
|
||||
frontend/*.env
|
||||
pkg/query-service/signoz.db
|
||||
|
||||
pkg/query-service/tests/test-deploy/data/
|
||||
|
||||
|
||||
# local data
|
||||
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
|
||||
@@ -11,7 +11,7 @@ tasks:
|
||||
- name: Run Docker Images
|
||||
init: |
|
||||
cd ./deploy
|
||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
# command:
|
||||
|
||||
- name: Run Frontend
|
||||
@@ -22,7 +22,7 @@ tasks:
|
||||
yarn dev
|
||||
|
||||
ports:
|
||||
- port: 3000
|
||||
- port: 3301
|
||||
onOpen: open-browser
|
||||
- port: 8080
|
||||
onOpen: ignore
|
||||
|
||||
@@ -4,4 +4,4 @@
|
||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||
|
||||
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
|
||||
385
CONTRIBUTING.md
385
CONTRIBUTING.md
@@ -1,127 +1,372 @@
|
||||
# How to Contribute
|
||||
# Contributing Guidelines
|
||||
|
||||
There are primarily 2 areas in which you can contribute in SigNoz
|
||||
## Welcome to SigNoz Contributing section 🎉
|
||||
|
||||
- Frontend ( written in Typescript, React)
|
||||
- Backend - ( Query Service - written in Go)
|
||||
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
|
||||
|
||||
> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
- We accept contributions made to the [SigNoz `develop` branch]()
|
||||
- Find all SigNoz Docker Hub images here
|
||||
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
|
||||
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
|
||||
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
|
||||
|
||||
> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
## Finding contributions to work on 💬
|
||||
|
||||
# Develop Frontend
|
||||
Looking at the existing issues is a great way to find something to contribute on.
|
||||
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
|
||||
|
||||
### Contribute to Frontend with Docker installation of SigNoz
|
||||
## Sections:
|
||||
- [General Instructions](#1-general-instructions-)
|
||||
- [For Creating Issue(s)](#11-for-creating-issues)
|
||||
- [For Pull Requests(s)](#12-for-pull-requests)
|
||||
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
|
||||
- [Develop Frontend](#3-develop-frontend-)
|
||||
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
|
||||
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
|
||||
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
|
||||
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
|
||||
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
|
||||
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
|
||||
- [Other Ways to Contribute](#other-ways-to-contribute)
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
|
||||
- run `cd deploy` to move to deploy directory
|
||||
- Install signoz locally without the frontend
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
|
||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
# 1. General Instructions 📝
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
## 1.1 For Creating Issue(s)
|
||||
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
|
||||
|
||||
### Contribute to Frontend without installing SigNoz backend
|
||||
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
|
||||
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
|
||||
#### Details like these are incredibly useful:
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
|
||||
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
- **Requirement** - what kind of use case are you trying to solve?
|
||||
- **Proposal** - what do you suggest to solve the problem or improve the existing
|
||||
situation?
|
||||
- Any open questions to address❓
|
||||
|
||||
**_Frontend should now be accessible at `http://localhost:3301/application`_**
|
||||
#### If you are reporting a bug, details like these are incredibly useful:
|
||||
|
||||
# Contribute to Query-Service
|
||||
- A reproducible test case or series of steps.
|
||||
- The version of our code being used.
|
||||
- Any modifications you've made relevant to the bug🐞.
|
||||
- Anything unusual about your environment or deployment.
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone 🙌.
|
||||
|
||||
### To run ClickHouse setup (recommended for local development)
|
||||
**[`^top^`](#)**
|
||||
|
||||
- git clone https://github.com/SigNoz/signoz.git
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L59`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- Install signoz locally without the frontend and query-service
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
|
||||
<hr>
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
## 1.2 For Pull Request(s)
|
||||
|
||||
**_Query Service should now be available at `http://localhost:8080`_**
|
||||
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
|
||||
Before sending us a pull request, please ensure that,
|
||||
|
||||
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
||||
- Fork the SigNoz repo on GitHub, clone it on your machine.
|
||||
- Create a branch with your changes.
|
||||
- You are working against the latest source on the `develop` branch.
|
||||
- Modify the source; please focus only on the specific change you are contributing.
|
||||
- Ensure local tests pass.
|
||||
- Commit to your fork using clear commit messages.
|
||||
- Send us a pull request, answering any default questions in the pull request interface.
|
||||
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
|
||||
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
|
||||
- Once the change has been approved and merged, we will inform you in a comment.
|
||||
|
||||
---
|
||||
Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
|
||||
|
||||
* 1️⃣ First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes, etc...
|
||||
* This PR is usually trivial to review, so the size limit does not apply to
|
||||
it.
|
||||
* 2️⃣ Second PR should include the concrete implementation of the component. If the
|
||||
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
|
||||
multiple PRs.
|
||||
* If there are multiple sub-component then ideally each one should be implemented as
|
||||
a **separate** pull request.
|
||||
* Last PR should include changes to **any user-facing documentation.** And should include
|
||||
end-to-end tests if applicable. The component must be enabled
|
||||
only after sufficient testing, and there is enough confidence in the
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
|
||||
|
||||
### Pointers:
|
||||
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
|
||||
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
|
||||
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
|
||||
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
<hr>
|
||||
|
||||
### Conventions to follow when submitting Commits and Pull Request(s).
|
||||
|
||||
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
|
||||
|
||||
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
|
||||
|
||||
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 2. How to Contribute 🙋🏻♂️
|
||||
|
||||
#### There are primarily 2 areas in which you can contribute to SigNoz
|
||||
|
||||
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
|
||||
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
|
||||
|
||||
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
|
||||
|
||||
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
|
||||
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 3. Develop Frontend 🌚
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
|
||||
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
|
||||
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
|
||||

|
||||
|
||||
|
||||
- run `cd deploy` to move to deploy directory,
|
||||
- Install signoz locally **without** the frontend,
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
|
||||
```
|
||||
ports:
|
||||
- "8080:8080"
|
||||
```
|
||||
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
|
||||
|
||||
- Next run,
|
||||
```
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
```
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
|
||||
If you have backend api exposed via frontend nginx:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:3301
|
||||
```
|
||||
If not:
|
||||
```
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz/frontend directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
|
||||
````
|
||||
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- Next,
|
||||
```
|
||||
yarn install
|
||||
yarn dev
|
||||
```
|
||||
|
||||
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
|
||||
|
||||
**Frontend should now be accessible at** [`http://localhost:3301/application`](http://localhost:3301/application)
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
<hr>
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
**Need to Update:** [**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
|
||||
|
||||
## 4.1 To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- Clone the SigNoz repository and cd into signoz directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||
|
||||
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
|
||||
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
|
||||
```
|
||||
ports:
|
||||
- 9001:9000
|
||||
```
|
||||
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
|
||||
|
||||
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||
- Then, you need to create a `.env` file with the following environment variable
|
||||
```
|
||||
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
|
||||
```
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
|
||||
|
||||
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
|
||||
|
||||
#### Run locally,
|
||||
```
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||
```
|
||||
|
||||
#### Build and Run locally
|
||||
```
|
||||
cd pkg/query-service
|
||||
go build -o build/query-service main.go
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
|
||||
```
|
||||
|
||||
#### Docker Images
|
||||
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
|
||||
|
||||
```
|
||||
docker pull signoz/query-service
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:latest
|
||||
```
|
||||
|
||||
```
|
||||
docker pull signoz/query-service:develop
|
||||
```
|
||||
|
||||
### Important Note:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
|
||||
|
||||
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
Click the button below. A workspace with all required environments will be created.
|
||||
|
||||
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz`
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
# Contribute to SigNoz Helm Chart
|
||||
**[`^top^`](#)**
|
||||
|
||||
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
|
||||
<hr>
|
||||
|
||||
### To run helm chart for local development
|
||||
# 5. Contribute to SigNoz Helm Chart 📊
|
||||
|
||||
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
|
||||
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
|
||||
|
||||
## 5.1 To run helm chart for local development
|
||||
|
||||
- Clone the SigNoz repository and cd into charts directory,
|
||||
```
|
||||
git clone https://github.com/SigNoz/charts.git && cd charts
|
||||
```
|
||||
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [k3d](https://k3d.io/#installation)
|
||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
||||
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
|
||||
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
|
||||
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
|
||||
- next run,
|
||||
```
|
||||
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
|
||||
```
|
||||
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
|
||||
**To load data with HotROD sample app:**
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
**5.1.2 To load data with the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**To stop the load generation:**
|
||||
**5.1.3 To stop the load generation:**
|
||||
|
||||
```sh
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
**[`^top^`](#)**
|
||||
|
||||
---
|
||||
|
||||
## General Instructions
|
||||
## Other Ways to Contribute
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
||||
There are many other ways to get involved with the community and to participate in this project:
|
||||
|
||||
- If you find any bugs, please create an issue
|
||||
- If you find anything missing in documentation, you can create an issue with label **documentation**
|
||||
- If you want to build any new feature, please create an issue with label `enhancement`
|
||||
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
|
||||
- Use the product, submitting GitHub issues when a problem is found.
|
||||
- Help code review pull requests and participate in issue threads.
|
||||
- Submit a new feature request as an issue.
|
||||
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
|
||||
- Tell others about the project on Twitter, your blog, etc.
|
||||
|
||||
### Conventions to follow when submitting commits, PRs
|
||||
|
||||
1. We try to follow https://www.conventionalcommits.org/en/v1.0.0/
|
||||
## License
|
||||
|
||||
More specifically the commits and PRs should have type specifiers prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
|
||||
By contributing to SigNoz, you agree that your contributions will be licensed under its MIT license.
|
||||
|
||||
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
|
||||
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
|
||||
|
||||
2. Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows
|
||||
|
||||
3. Feel free to ping us on `#contributing` or `#contributing-frontend` on our slack community if you need any help on this :)
|
||||
Thank You!
|
||||
|
||||
51
Makefile
51
Makefile
@@ -10,15 +10,15 @@ BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
FLATTENER_DIRECTORY ?= pkg/processors/flattener
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= latest
|
||||
|
||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
|
||||
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/query-service
|
||||
@@ -29,7 +29,7 @@ gitBranch=${PACKAGE}/version.gitBranch
|
||||
|
||||
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||
|
||||
all: build-push-frontend build-push-query-service build-push-flattener
|
||||
all: build-push-frontend build-push-query-service
|
||||
# Steps to build and push docker image of frontend
|
||||
.PHONY: build-frontend-amd64 build-push-frontend
|
||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||
@@ -38,7 +38,8 @@ build-frontend-amd64:
|
||||
@echo "--> Building frontend docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" .
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of frontend(used in push pipeline)
|
||||
build-push-frontend:
|
||||
@@ -46,7 +47,8 @@ build-push-frontend:
|
||||
@echo "--> Building and pushing frontend docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
|
||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
.PHONY: build-query-service-amd64 build-push-query-service
|
||||
@@ -56,7 +58,8 @@ build-query-service-amd64:
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS) .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-query-service:
|
||||
@@ -64,25 +67,9 @@ build-push-query-service:
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of flattener
|
||||
.PHONY: build-flattener-amd64 build-push-flattener
|
||||
# Step to build docker image of flattener in amd64 (used in build pipeline)
|
||||
build-flattener-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building flattener docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
|
||||
|
||||
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
|
||||
build-push-flattener:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing flattener docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
dev-setup:
|
||||
mkdir -p /var/lib/signoz
|
||||
@@ -93,7 +80,15 @@ dev-setup:
|
||||
@echo "------------------"
|
||||
|
||||
run-x86:
|
||||
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up -d
|
||||
|
||||
run-arm:
|
||||
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
||||
down-x86:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
clear-standalone-data:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
|
||||
clear-swarm-data:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
|
||||
31
README.md
31
README.md
@@ -6,7 +6,7 @@
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
@@ -32,10 +32,11 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||
|
||||

|
||||
|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -87,7 +88,6 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
|
||||
|
||||
## Getting Started
|
||||
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
|
||||
@@ -101,7 +101,6 @@ The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshoo
|
||||
|
||||
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
@@ -131,11 +130,28 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Pranshu Chittora](https://github.com/pranshuchittora)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
@@ -159,6 +175,3 @@ As always, thanks to our amazing contributors!
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
|
||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||
|
||||
## Supported Versions
|
||||
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||
|
||||
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||
|
||||
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thank you for keeping SigNoz and our users safe. 🙇
|
||||
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
File diff suppressed because it is too large
Load Diff
28
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
28
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,28 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -1,19 +1,24 @@
|
||||
version: "3"
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
- 8123:8123
|
||||
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
@@ -21,86 +26,119 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
restart: always
|
||||
image: signoz/query-service:0.10.0
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- STORAGE=clickhouse
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
image: signoz/frontend:0.10.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=2000"]
|
||||
image: signoz/otelcontribcol:0.45.1-1.1
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
resources:
|
||||
limits:
|
||||
memory: 2000m
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
otel-collector-hostmetrics:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config-hostmetrics.yaml", "--mem-ballast-size-mib=683"]
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.45.1-1.1
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config-hostmetrics.yaml:/etc/otel-collector-config-hostmetrics.yaml
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
|
||||
hostmetrics:
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- 'tasks.signoz_otel-collector'
|
||||
type: 'A'
|
||||
port: 8888
|
||||
- job_name: "otel-collector-hostmetrics"
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: ["otel-collector-hostmetrics:8888"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
metrics:
|
||||
receivers: [otlp, prometheus, hostmetrics]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -1,47 +1,108 @@
|
||||
receivers:
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: localhost:12345
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
override: false
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
receivers:
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- otel-collector:8888
|
||||
# otel-collector-metrics internal metrics
|
||||
- job_name: "otel-collector-metrics"
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
# SigNoz span metrics
|
||||
- job_name: "signozspanmetrics-collector"
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- otel-collector:8889
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -9,12 +9,13 @@ alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
|
||||
@@ -12,13 +12,20 @@ server {
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
28
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
28
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,28 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
@@ -2,10 +2,21 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
@@ -14,38 +25,52 @@ services:
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.5.0
|
||||
image: signoz/alertmanager:0.23.0-0.1
|
||||
volumes:
|
||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- '--config.file=/prometheus/alertmanager.yml'
|
||||
- '--storage.path=/data'
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.6.1
|
||||
image: signoz/query-service:0.10.0
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.6.1
|
||||
image: signoz/frontend:0.10.0
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
@@ -53,23 +78,41 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.5.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
image: signoz/otelcontribcol:0.45.1-1.1
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
mem_limit: 2000m
|
||||
restart: always
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.5.0
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
image: signoz/otelcontribcol:0.45.1-1.1
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
# ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -1,67 +1,111 @@
|
||||
receivers:
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
endpoint: localhost:12345
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
dimensions_cache_size: 10000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
|
||||
timeout: 2s
|
||||
override: false
|
||||
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
endpoint: 0.0.0.0:8889
|
||||
# logging: {}
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhouse]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
@@ -3,42 +3,71 @@ receivers:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
- targets:
|
||||
- otel-collector:8888
|
||||
# otel-collector-metrics internal metrics
|
||||
- job_name: "otel-collector-metrics"
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
# SigNoz span metrics
|
||||
- job_name: "signozspanmetrics-collector"
|
||||
scrape_interval: 60s
|
||||
static_configs:
|
||||
- targets:
|
||||
- otel-collector:8889
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -11,17 +11,26 @@ server {
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
|
||||
large_client_header_buffers 8 16k;
|
||||
|
||||
location / {
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- coordinator_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_tiny/coordinator
|
||||
- environment_tiny/common
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_tiny/broker
|
||||
- environment_tiny/common
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- historical_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_tiny/historical
|
||||
- environment_tiny/common
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- middle_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_tiny/middlemanager
|
||||
- environment_tiny/common
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_tiny/router
|
||||
- environment_tiny/common
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,269 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- coordinator_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_small/coordinator
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_small/broker
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- historical_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_small/historical
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- middle_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_small/middlemanager
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_small/router
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.1
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ./locust-scripts:/locust
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=100MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=1g
|
||||
DRUID_XMS=1g
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=128m
|
||||
DRUID_XMS=128m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,26 +0,0 @@
|
||||
# For S3 storage
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
||||
|
||||
|
||||
# druid_storage_type=s3
|
||||
# druid_storage_bucket=<s3-bucket-name>
|
||||
# druid_storage_baseKey=druid/segments
|
||||
|
||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
||||
# AWS_REGION=<s3-aws-region>
|
||||
|
||||
# druid_indexer_logs_type=s3
|
||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# For local storage
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/data/segments
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,50 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,51 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -36,9 +36,9 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
# is_arm64(){
|
||||
# [[ `uname -m` == 'arm64' ]]
|
||||
# }
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
@@ -102,7 +102,7 @@ check_os() {
|
||||
# The script should error out in case they aren't available
|
||||
check_ports_occupied() {
|
||||
local port_check_output
|
||||
local ports_pattern="80|3301|8080"
|
||||
local ports_pattern="3301|4317"
|
||||
|
||||
if is_mac; then
|
||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||
@@ -119,7 +119,7 @@ check_ports_occupied() {
|
||||
send_event "port_not_available"
|
||||
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
@@ -133,58 +133,44 @@ install_docker() {
|
||||
|
||||
|
||||
if [[ $package_manager == apt-get ]]; then
|
||||
apt_cmd="sudo apt-get --yes --quiet"
|
||||
apt_cmd="$sudo_cmd apt-get --yes --quiet"
|
||||
$apt_cmd update
|
||||
$apt_cmd install software-properties-common gnupg-agent
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
|
||||
sudo add-apt-repository \
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||
$sudo_cmd add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||
$apt_cmd update
|
||||
echo "Installing docker"
|
||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||
elif [[ $package_manager == zypper ]]; then
|
||||
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
|
||||
zypper_cmd="$sudo_cmd zypper --quiet --no-gpg-checks --non-interactive"
|
||||
echo "Installing docker"
|
||||
if [[ $os == sles ]]; then
|
||||
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
||||
os_arch="$(uname -i)"
|
||||
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||
SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||
fi
|
||||
$zypper_cmd install docker docker-runc containerd
|
||||
sudo systemctl enable docker.service
|
||||
$sudo_cmd systemctl enable docker.service
|
||||
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
||||
echo
|
||||
echo "Amazon Linux detected ... "
|
||||
echo
|
||||
# sudo yum install docker
|
||||
# sudo service docker start
|
||||
sudo amazon-linux-extras install docker
|
||||
# yum install docker
|
||||
# service docker start
|
||||
$sudo_cmd yum install -y amazon-linux-extras
|
||||
$sudo_cmd amazon-linux-extras enable docker
|
||||
$sudo_cmd yum install -y docker
|
||||
else
|
||||
|
||||
yum_cmd="sudo yum --assumeyes --quiet"
|
||||
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
|
||||
$yum_cmd install yum-utils
|
||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
install_docker_machine() {
|
||||
|
||||
echo "\nInstalling docker machine ..."
|
||||
|
||||
if [[ $os == "Mac" ]];then
|
||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
|
||||
chmod +x /usr/local/bin/docker-machine
|
||||
else
|
||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
|
||||
chmod +x /tmp/docker-machine
|
||||
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
||||
|
||||
fi
|
||||
|
||||
|
||||
}
|
||||
|
||||
install_docker_compose() {
|
||||
@@ -192,9 +178,9 @@ install_docker_compose() {
|
||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||
echo "++++++++++++++++++++++++"
|
||||
echo "Installing docker-compose"
|
||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
echo "docker-compose installed!"
|
||||
echo ""
|
||||
fi
|
||||
@@ -210,16 +196,23 @@ install_docker_compose() {
|
||||
}
|
||||
|
||||
start_docker() {
|
||||
echo "Starting Docker ..."
|
||||
if [ $os = "Mac" ]; then
|
||||
echo -e "🐳 Starting Docker ...\n"
|
||||
if [[ $os == "Mac" ]]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
if ! sudo systemctl is-active docker.service > /dev/null; then
|
||||
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||
echo "Starting docker service"
|
||||
sudo systemctl start docker.service
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
docker ps > /dev/null && true
|
||||
if [[ $? -ne 0 ]]; then
|
||||
request_sudo
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
wait_for_containers_start() {
|
||||
local timeout=$1
|
||||
|
||||
@@ -229,16 +222,6 @@ wait_for_containers_start() {
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
if [ $setup_type == 'druid' ]; then
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
||||
|
||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
||||
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
||||
fi
|
||||
((timeout--))
|
||||
@@ -249,24 +232,18 @@ wait_for_containers_start() {
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
if [ "$?" -ne 0 ]; then
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
set +o errexit
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
else
|
||||
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
if [[ $email == "" ]]; then
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
@@ -274,6 +251,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
fi
|
||||
|
||||
send_event "installation_support"
|
||||
|
||||
@@ -284,33 +262,73 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
fi
|
||||
}
|
||||
|
||||
request_sudo() {
|
||||
if hash sudo 2>/dev/null; then
|
||||
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompt."
|
||||
$sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
echo -e "Got it! Thanks!! 🙏\n"
|
||||
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
echo "🟡 Running installer with non-sudo permissions."
|
||||
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
|
||||
echo ""
|
||||
else
|
||||
sudo_cmd="sudo"
|
||||
fi
|
||||
|
||||
# Checking OS and assigning package manager
|
||||
desired_os=0
|
||||
os=""
|
||||
email=""
|
||||
echo -e "Detecting your OS ..."
|
||||
echo -e "🌏 Detecting your OS ...\n"
|
||||
check_os
|
||||
|
||||
# Obtain unique installation id
|
||||
sysinfo="$(uname -a)"
|
||||
if [ $? -ne 0 ]; then
|
||||
if [[ $? -ne 0 ]]; then
|
||||
uuid="$(uuidgen)"
|
||||
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
fi
|
||||
|
||||
digest_cmd=""
|
||||
if hash shasum 2>/dev/null; then
|
||||
digest_cmd="shasum -a 256"
|
||||
elif hash sha256sum 2>/dev/null; then
|
||||
digest_cmd="sha256sum"
|
||||
elif hash openssl 2>/dev/null; then
|
||||
digest_cmd="openssl dgst -sha256"
|
||||
fi
|
||||
|
||||
if [[ -z $digest_cmd ]]; then
|
||||
SIGNOZ_INSTALLATION_ID="$sysinfo"
|
||||
else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1)
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
@@ -323,8 +341,6 @@ fi
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# else
|
||||
# setup_type='druid'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
@@ -364,13 +380,7 @@ send_event() {
|
||||
'installation_error_checks')
|
||||
event="Installation Error - Checks"
|
||||
error="Containers not started"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
others='"data": "some_checks",'
|
||||
else
|
||||
supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
||||
others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",'
|
||||
fi
|
||||
;;
|
||||
'installation_support')
|
||||
event="Installation Support"
|
||||
@@ -389,7 +399,7 @@ send_event() {
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$error" != "" ]; then
|
||||
if [[ "$error" != "" ]]; then
|
||||
error='"error": "'"$error"'", '
|
||||
fi
|
||||
|
||||
@@ -412,15 +422,28 @@ fi
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||
request_sudo
|
||||
install_docker
|
||||
else
|
||||
# enable docker without sudo from next reboot
|
||||
sudo usermod -aG docker "${USER}"
|
||||
elif is_mac; then
|
||||
echo ""
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/docker-for-mac/install/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
send_event "docker_not_installed"
|
||||
exit 1
|
||||
else
|
||||
echo ""
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "Docker must be installed manually on your machine to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/get-docker/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
send_event "docker_not_installed"
|
||||
exit 1
|
||||
fi
|
||||
@@ -428,43 +451,25 @@ fi
|
||||
|
||||
# Install docker-compose
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
|
||||
|
||||
start_docker
|
||||
|
||||
|
||||
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
|
||||
else
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
||||
fi
|
||||
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -473,11 +478,9 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
@@ -495,15 +498,7 @@ else
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
if is_arm64; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
fi
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
||||
fi
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
@@ -2,3 +2,4 @@ node_modules
|
||||
.vscode
|
||||
build
|
||||
.env
|
||||
.git
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
node_modules
|
||||
build
|
||||
*.typegen.ts
|
||||
|
||||
@@ -3,16 +3,23 @@ module.exports = {
|
||||
browser: true,
|
||||
es2021: true,
|
||||
node: true,
|
||||
'jest/globals': true,
|
||||
},
|
||||
extends: [
|
||||
'airbnb',
|
||||
'airbnb-typescript',
|
||||
'eslint:recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:@typescript-eslint/eslint-recommended',
|
||||
'plugin:prettier/recommended',
|
||||
'plugin:sonarjs/recommended',
|
||||
'plugin:import/errors',
|
||||
'plugin:import/warnings',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
project: './tsconfig.json',
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
@@ -25,10 +32,17 @@ module.exports = {
|
||||
'simple-import-sort',
|
||||
'react-hooks',
|
||||
'prettier',
|
||||
'jest',
|
||||
],
|
||||
settings: {
|
||||
react: {
|
||||
version: 'latest',
|
||||
version: 'detect',
|
||||
},
|
||||
'import/resolver': {
|
||||
node: {
|
||||
paths: ['src'],
|
||||
extensions: ['.js', '.jsx', '.ts', '.tsx'],
|
||||
},
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
@@ -40,9 +54,13 @@ module.exports = {
|
||||
],
|
||||
'react/prop-types': 'off',
|
||||
'@typescript-eslint/explicit-function-return-type': 'error',
|
||||
'@typescript-eslint/no-var-requires': 0,
|
||||
'react/no-array-index-key': 2,
|
||||
'linebreak-style': ['error', process.platform === 'win32' ? 'windows' : 'unix'],
|
||||
'@typescript-eslint/no-var-requires': 'error',
|
||||
'react/no-array-index-key': 'error',
|
||||
'linebreak-style': [
|
||||
'error',
|
||||
process.platform === 'win32' ? 'windows' : 'unix',
|
||||
],
|
||||
'@typescript-eslint/default-param-last': 'off',
|
||||
|
||||
// simple sort error
|
||||
'simple-import-sort/imports': 'error',
|
||||
@@ -50,7 +68,45 @@ module.exports = {
|
||||
|
||||
// hooks
|
||||
'react-hooks/rules-of-hooks': 'error',
|
||||
'react-hooks/exhaustive-deps': 'warn',
|
||||
'react-hooks/exhaustive-deps': 'error',
|
||||
|
||||
// airbnb
|
||||
'no-underscore-dangle': 'off',
|
||||
'no-console': 'off',
|
||||
'import/prefer-default-export': 'off',
|
||||
'import/extensions': [
|
||||
'error',
|
||||
'ignorePackages',
|
||||
{
|
||||
js: 'never',
|
||||
jsx: 'never',
|
||||
ts: 'never',
|
||||
tsx: 'never',
|
||||
},
|
||||
],
|
||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||
'jsx-a11y/label-has-associated-control': [
|
||||
'error',
|
||||
{
|
||||
required: {
|
||||
some: ['nesting', 'id'],
|
||||
},
|
||||
},
|
||||
],
|
||||
'jsx-a11y/label-has-for': [
|
||||
'error',
|
||||
{
|
||||
required: {
|
||||
some: ['nesting', 'id'],
|
||||
},
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
|
||||
// eslint rules need to remove
|
||||
'no-shadow': 'off',
|
||||
'@typescript-eslint/no-shadow': 'off',
|
||||
'import/no-cycle': 'off',
|
||||
|
||||
'prettier/prettier': [
|
||||
'error',
|
||||
|
||||
4
frontend/.husky/commit-msg
Executable file
4
frontend/.husky/commit-msg
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && npm run commitlint
|
||||
4
frontend/.husky/pre-commit
Executable file
4
frontend/.husky/pre-commit
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && yarn lint-staged
|
||||
@@ -1 +1 @@
|
||||
12.13.0
|
||||
16.15.0
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage1 as builder
|
||||
FROM node:12.18.0 as builder
|
||||
# Builder stage
|
||||
FROM node:16.15.0-slim as builder
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
@@ -9,24 +9,23 @@ ARG TARGETARCH
|
||||
|
||||
WORKDIR /frontend
|
||||
|
||||
# copy the package.json to install dependencies
|
||||
# Copy the package.json to install dependencies
|
||||
COPY package.json ./
|
||||
|
||||
# Install the dependencies and make the folder
|
||||
RUN yarn install
|
||||
RUN CI=1 yarn install
|
||||
|
||||
COPY . .
|
||||
|
||||
# Build the project and copy the files
|
||||
RUN yarn build
|
||||
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
#!/bin/sh
|
||||
FROM nginx:1.18-alpine
|
||||
|
||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
## Remove default nginx index page
|
||||
# Remove default nginx index page
|
||||
RUN rm -rf /usr/share/nginx/html/*
|
||||
|
||||
# Copy from the stahg 1
|
||||
|
||||
6
frontend/babel.config.js
Normal file
6
frontend/babel.config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
presets: [
|
||||
['@babel/preset-env', { targets: { node: 'current' } }],
|
||||
'@babel/preset-typescript',
|
||||
],
|
||||
};
|
||||
1
frontend/commitlint.config.ts
Normal file
1
frontend/commitlint.config.ts
Normal file
@@ -0,0 +1 @@
|
||||
export default { extends: ['@commitlint/config-conventional'] };
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"video": false
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
const Login = ({ email, name }: LoginProps): void => {
|
||||
const emailInput = cy.findByPlaceholderText('mike@netflix.com');
|
||||
|
||||
emailInput.then((emailInput) => {
|
||||
const element = emailInput[0];
|
||||
// element is present
|
||||
expect(element).not.undefined;
|
||||
expect(element.nodeName).to.be.equal('INPUT');
|
||||
});
|
||||
emailInput.type(email).then((inputElements) => {
|
||||
const inputElement = inputElements[0];
|
||||
const inputValue = inputElement.getAttribute('value');
|
||||
expect(inputValue).to.be.equals(email);
|
||||
});
|
||||
|
||||
const firstNameInput = cy.findByPlaceholderText('Mike');
|
||||
firstNameInput.then((firstNameInput) => {
|
||||
const element = firstNameInput[0];
|
||||
// element is present
|
||||
expect(element).not.undefined;
|
||||
expect(element.nodeName).to.be.equal('INPUT');
|
||||
});
|
||||
|
||||
firstNameInput.type(name).then((inputElements) => {
|
||||
const inputElement = inputElements[0];
|
||||
const inputValue = inputElement.getAttribute('value');
|
||||
expect(inputValue).to.be.equals(name);
|
||||
});
|
||||
|
||||
const gettingStartedButton = cy.findByText('Get Started');
|
||||
gettingStartedButton.click();
|
||||
|
||||
cy
|
||||
.intercept('POST', '/api/v1/user?email*', {
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('defaultUser');
|
||||
|
||||
cy.wait('@defaultUser');
|
||||
};
|
||||
|
||||
export interface LoginProps {
|
||||
email: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export default Login;
|
||||
@@ -1,49 +0,0 @@
|
||||
import {
|
||||
getDefaultOption,
|
||||
getOptions,
|
||||
} from 'container/Header/DateTimeSelection/config';
|
||||
// import { AppState } from 'store/reducers';
|
||||
|
||||
const CheckRouteDefaultGlobalTimeOptions = ({
|
||||
route,
|
||||
}: CheckRouteDefaultGlobalTimeOptionsProps): void => {
|
||||
cy.visit(Cypress.env('baseUrl') + route);
|
||||
|
||||
const allOptions = getOptions(route);
|
||||
|
||||
const defaultValue = getDefaultOption(route);
|
||||
|
||||
const defaultSelectedOption = allOptions.find((e) => e.value === defaultValue);
|
||||
|
||||
expect(defaultSelectedOption).not.undefined;
|
||||
|
||||
cy
|
||||
.findAllByTestId('dropDown')
|
||||
.find('span')
|
||||
.then((el) => {
|
||||
const elements = el.get();
|
||||
|
||||
const item = elements[1];
|
||||
|
||||
expect(defaultSelectedOption?.label).to.be.equals(
|
||||
item.innerText,
|
||||
'Default option is not matching',
|
||||
);
|
||||
});
|
||||
|
||||
// cy
|
||||
// .window()
|
||||
// .its('store')
|
||||
// .invoke('getState')
|
||||
// .then((e: AppState) => {
|
||||
// const { globalTime } = e;
|
||||
// const { maxTime, minTime } = globalTime;
|
||||
// // @TODO match the global min time and max time according to the selected option
|
||||
// });
|
||||
};
|
||||
|
||||
export interface CheckRouteDefaultGlobalTimeOptionsProps {
|
||||
route: string;
|
||||
}
|
||||
|
||||
export default CheckRouteDefaultGlobalTimeOptions;
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 1,
|
||||
"name": "First Channels",
|
||||
"type": "slack",
|
||||
"updated_at": 1638083159246
|
||||
},
|
||||
{
|
||||
"created_at": 1638083159246,
|
||||
"data": "{}",
|
||||
"id": 2,
|
||||
"name": "Second Channels",
|
||||
"type": "Slack",
|
||||
"updated_at": 1638083159246
|
||||
}
|
||||
],
|
||||
"message": "Success"
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
[
|
||||
{
|
||||
"serviceName": "frontend",
|
||||
"p99": 1134610000,
|
||||
"avgDuration": 744523000,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
},
|
||||
{
|
||||
"serviceName": "customer",
|
||||
"p99": 734422400,
|
||||
"avgDuration": 348678530,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
},
|
||||
{
|
||||
"serviceName": "driver",
|
||||
"p99": 239234080,
|
||||
"avgDuration": 204662290,
|
||||
"numCalls": 267,
|
||||
"callRate": 0.89,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0,
|
||||
"num4XX": 0,
|
||||
"fourXXRate": 0
|
||||
}
|
||||
]
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"rules": [
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "First Rule",
|
||||
"id": 1
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "warning" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Second Rule",
|
||||
"id": 2
|
||||
},
|
||||
{
|
||||
"labels": { "severity": "P0" },
|
||||
"annotations": {},
|
||||
"state": "firing",
|
||||
"name": "Third Rule",
|
||||
"id": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
{ "status": "success", "data": { "resultType": "matrix", "result": [] } }
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"resultType": "matrix",
|
||||
"result": [
|
||||
{
|
||||
"metric": {},
|
||||
"values": [
|
||||
[1634741764.961, "0.9"],
|
||||
[1634741824.961, "0.9"],
|
||||
[1634741884.961, "0.8666666666666667"],
|
||||
[1634741944.961, "1"],
|
||||
[1634742004.961, "0.9166666666666666"],
|
||||
[1634742064.961, "0.95"],
|
||||
[1634742124.961, "0.9333333333333333"],
|
||||
[1634742184.961, "0.95"],
|
||||
[1634742244.961, "1.0333333333333334"],
|
||||
[1634742304.961, "0.9333333333333333"],
|
||||
[1634742364.961, "0.9166666666666666"],
|
||||
[1634742424.961, "0.9"],
|
||||
[1634742484.961, "1.0166666666666666"],
|
||||
[1634742544.961, "0.8333333333333334"],
|
||||
[1634742604.961, "0.9166666666666666"],
|
||||
[1634742664.961, "0.95"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
[
|
||||
{
|
||||
"timestamp": 1634742600000000000,
|
||||
"p50": 720048500,
|
||||
"p95": 924409540,
|
||||
"p99": 974744300,
|
||||
"numCalls": 48,
|
||||
"callRate": 0.8,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742540000000000,
|
||||
"p50": 712614000,
|
||||
"p95": 955580700,
|
||||
"p99": 1045595400,
|
||||
"numCalls": 59,
|
||||
"callRate": 0.98333335,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742480000000000,
|
||||
"p50": 720842000,
|
||||
"p95": 887187600,
|
||||
"p99": 943676860,
|
||||
"numCalls": 53,
|
||||
"callRate": 0.8833333,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742420000000000,
|
||||
"p50": 712287000,
|
||||
"p95": 908505540,
|
||||
"p99": 976507650,
|
||||
"numCalls": 58,
|
||||
"callRate": 0.96666664,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742360000000000,
|
||||
"p50": 697125500,
|
||||
"p95": 975581800,
|
||||
"p99": 1190121900,
|
||||
"numCalls": 54,
|
||||
"callRate": 0.9,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
},
|
||||
{
|
||||
"timestamp": 1634742300000000000,
|
||||
"p50": 711592500,
|
||||
"p95": 880559900,
|
||||
"p99": 1100105500,
|
||||
"numCalls": 40,
|
||||
"callRate": 0.6666667,
|
||||
"numErrors": 0,
|
||||
"errorRate": 0
|
||||
}
|
||||
]
|
||||
@@ -1,9 +0,0 @@
|
||||
[
|
||||
{
|
||||
"p50": 710824000,
|
||||
"p95": 1003231400,
|
||||
"p99": 1231265500,
|
||||
"numCalls": 299,
|
||||
"name": "HTTP GET /dispatch"
|
||||
}
|
||||
]
|
||||
@@ -1,24 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
describe('App Layout', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
});
|
||||
|
||||
it('Check the user is in Logged Out State', async () => {
|
||||
cy.location('pathname').then((e) => {
|
||||
expect(e).to.be.equal(ROUTES.SIGN_UP);
|
||||
});
|
||||
});
|
||||
|
||||
it('Logged In State', () => {
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,52 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
|
||||
|
||||
describe('Channels', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
|
||||
});
|
||||
|
||||
it('Channels', () => {
|
||||
cy
|
||||
.intercept('**channels**', {
|
||||
statusCode: 200,
|
||||
fixture: 'defaultAllChannels',
|
||||
})
|
||||
.as('All Channels');
|
||||
|
||||
cy.wait('@All Channels');
|
||||
|
||||
cy
|
||||
.get('.ant-tabs-tab')
|
||||
.children()
|
||||
.then((e) => {
|
||||
const child = e.get();
|
||||
|
||||
const secondChild = child[1];
|
||||
|
||||
expect(secondChild.outerText).to.be.equals('Alert Channels');
|
||||
|
||||
expect(secondChild.ariaSelected).to.be.equals('true');
|
||||
});
|
||||
|
||||
cy
|
||||
.get('tbody')
|
||||
.should('be.visible')
|
||||
.then((e) => {
|
||||
const allChildren = e.children().get();
|
||||
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
|
||||
|
||||
allChildren.forEach((e, index) => {
|
||||
expect(e.firstChild?.textContent).not.null;
|
||||
expect(e.firstChild?.textContent).to.be.equals(
|
||||
defaultAllChannels.data[index].name,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,44 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
describe('default time', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
});
|
||||
|
||||
it('Metrics Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.APPLICATION,
|
||||
});
|
||||
});
|
||||
|
||||
it('Dashboard Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.ALL_DASHBOARD,
|
||||
});
|
||||
});
|
||||
|
||||
it('Trace Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.TRACE,
|
||||
});
|
||||
});
|
||||
|
||||
it('Instrumentation Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.INSTRUMENTATION,
|
||||
});
|
||||
});
|
||||
|
||||
it('Service Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.SERVICE_MAP,
|
||||
});
|
||||
});
|
||||
|
||||
it('Settings Page default time', () => {
|
||||
cy.checkDefaultGlobalOption({
|
||||
route: ROUTES.SETTINGS,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,126 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import getGlobalDropDownFormatedDate from 'lib/getGlobalDropDownFormatedDate';
|
||||
import { AppState } from 'store/reducers';
|
||||
|
||||
import topEndPoints from '../../fixtures/topEndPoints.json';
|
||||
|
||||
describe('Global Time Metrics Application', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
|
||||
it('Metrics Application', async () => {
|
||||
cy
|
||||
.intercept('GET', '/api/v1/services*', {
|
||||
fixture: 'defaultApp.json',
|
||||
})
|
||||
.as('defaultApps');
|
||||
|
||||
cy.wait('@defaultApps');
|
||||
|
||||
//clicking on frontend
|
||||
cy.get('tr:nth-child(1) > td:first-child').click();
|
||||
|
||||
cy
|
||||
.intercept('GET', '/api/v1/service/top_endpoints*', {
|
||||
fixture: 'topEndPoints.json',
|
||||
})
|
||||
.as('topEndPoints');
|
||||
|
||||
cy
|
||||
.intercept('GET', '/api/v1/service/overview?*', {
|
||||
fixture: 'serviceOverview.json',
|
||||
})
|
||||
.as('serviceOverview');
|
||||
|
||||
cy
|
||||
.intercept(
|
||||
'GET',
|
||||
`/api/v1/query_range?query=sum(rate(signoz_latency_count*`,
|
||||
{
|
||||
fixture: 'requestPerSecond.json',
|
||||
},
|
||||
)
|
||||
.as('requestPerSecond');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.its('store')
|
||||
.invoke('getState')
|
||||
.then((e: AppState) => {
|
||||
const { globalTime } = e;
|
||||
|
||||
const { maxTime, minTime } = globalTime;
|
||||
|
||||
// intercepting metrics application call
|
||||
|
||||
cy.wait('@topEndPoints');
|
||||
cy.wait('@serviceOverview');
|
||||
//TODO add errorPercentage also
|
||||
// cy.wait('@errorPercentage');
|
||||
cy.wait('@requestPerSecond');
|
||||
|
||||
cy
|
||||
.get('tbody tr:first-child td:first-child')
|
||||
.then((el) => {
|
||||
const elements = el.get();
|
||||
|
||||
expect(elements.length).to.be.equals(1);
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
expect(element.innerText).to.be.equals(topEndPoints[0].name);
|
||||
})
|
||||
.click();
|
||||
|
||||
cy
|
||||
.findAllByTestId('dropDown')
|
||||
.find('span.ant-select-selection-item')
|
||||
.then((e) => {
|
||||
const elements = e;
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
const customSelectedTime = element.innerText;
|
||||
|
||||
const startTime = new Date(minTime / 1000000);
|
||||
const endTime = new Date(maxTime / 1000000);
|
||||
|
||||
const startString = getGlobalDropDownFormatedDate(startTime);
|
||||
const endString = getGlobalDropDownFormatedDate(endTime);
|
||||
|
||||
const result = `${startString} - ${endString}`;
|
||||
|
||||
expect(customSelectedTime).to.be.equals(result);
|
||||
});
|
||||
|
||||
cy
|
||||
.findByTestId('dropDown')
|
||||
.click()
|
||||
.then(() => {
|
||||
cy.findByTitle('Last 30 min').click();
|
||||
});
|
||||
|
||||
cy
|
||||
.findByTestId('dropDown')
|
||||
.find('span.ant-select-selection-item')
|
||||
.then((e) => {
|
||||
const elements = e;
|
||||
|
||||
const element = elements[0];
|
||||
|
||||
const selectedTime = element.innerText;
|
||||
|
||||
expect(selectedTime).to.be.equals('Last 30 min');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,67 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
import ROUTES from 'constants/routes';
|
||||
import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond';
|
||||
|
||||
import defaultApps from '../../fixtures/defaultApp.json';
|
||||
|
||||
describe('Metrics', () => {
|
||||
beforeEach(() => {
|
||||
cy.visit(Cypress.env('baseUrl'));
|
||||
|
||||
const testEmail = 'test@test.com';
|
||||
const firstName = 'Test';
|
||||
|
||||
cy.login({
|
||||
email: testEmail,
|
||||
name: firstName,
|
||||
});
|
||||
});
|
||||
|
||||
it('Default Apps', () => {
|
||||
cy
|
||||
.intercept('GET', '/api/v1/services*', {
|
||||
fixture: 'defaultApp.json',
|
||||
})
|
||||
.as('defaultApps');
|
||||
|
||||
cy.wait('@defaultApps');
|
||||
|
||||
cy.location().then((e) => {
|
||||
expect(e.pathname).to.be.equals(ROUTES.APPLICATION);
|
||||
|
||||
cy.get('tbody').then((elements) => {
|
||||
const trElements = elements.children();
|
||||
expect(trElements.length).to.be.equal(defaultApps.length);
|
||||
const getChildren = (row: Element): Element => {
|
||||
if (row.children.length === 0) {
|
||||
return row;
|
||||
}
|
||||
return getChildren(row.children[0]);
|
||||
};
|
||||
|
||||
// this is row element
|
||||
trElements.map((index, element) => {
|
||||
const [
|
||||
applicationElement,
|
||||
p99Element,
|
||||
errorRateElement,
|
||||
rpsElement,
|
||||
] = element.children;
|
||||
const applicationName = getChildren(applicationElement).innerHTML;
|
||||
const p99Name = getChildren(p99Element).innerHTML;
|
||||
const errorRateName = getChildren(errorRateElement).innerHTML;
|
||||
const rpsName = getChildren(rpsElement).innerHTML;
|
||||
const { serviceName, p99, errorRate, callRate } = defaultApps[index];
|
||||
expect(applicationName).to.be.equal(serviceName);
|
||||
expect(p99Name).to.be.equal(convertToNanoSecondsToSecond(p99).toString());
|
||||
expect(errorRateName).to.be.equals(
|
||||
parseFloat(errorRate.toString()).toFixed(2),
|
||||
);
|
||||
expect(rpsName).to.be.equals(callRate.toString());
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
export {};
|
||||
@@ -1,128 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
import ROUTES from 'constants/routes';
|
||||
|
||||
import defaultRules from '../../fixtures/defaultRules.json';
|
||||
|
||||
describe('Alerts', () => {
|
||||
beforeEach(() => {
|
||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
||||
|
||||
cy
|
||||
.intercept('get', '*rules*', {
|
||||
fixture: 'defaultRules',
|
||||
})
|
||||
.as('defaultRules');
|
||||
|
||||
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
|
||||
|
||||
cy.wait('@defaultRules');
|
||||
});
|
||||
|
||||
it('Edit Rules Page Failure', async () => {
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
statusCode: 500,
|
||||
})
|
||||
.as('Get Rules Error');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Error');
|
||||
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then((e) => {
|
||||
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
|
||||
});
|
||||
|
||||
cy.findByText('Something went wrong').then((e) => {
|
||||
expect(e.length).to.be.equals(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Edit Rules Page Success', async () => {
|
||||
const text = 'this is the sample value';
|
||||
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
statusCode: 200,
|
||||
body: {
|
||||
data: {
|
||||
data: text,
|
||||
},
|
||||
},
|
||||
})
|
||||
.as('Get Rules Success');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
firstDelete.click();
|
||||
|
||||
cy.waitFor('@Get Rules Success');
|
||||
|
||||
cy.wait(1000);
|
||||
|
||||
cy.findByText('Save').then((e) => {
|
||||
const [el] = e.get();
|
||||
|
||||
el.click();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('All Rules are rendered correctly', async () => {
|
||||
cy
|
||||
.window()
|
||||
.location()
|
||||
.then(({ pathname }) => {
|
||||
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const tarray = e.children().get();
|
||||
|
||||
expect(tarray.length).to.be.equals(3);
|
||||
|
||||
tarray.forEach(({ children }, index) => {
|
||||
const name = children[1]?.textContent;
|
||||
const label = children[2]?.textContent;
|
||||
|
||||
expect(name).to.be.equals(defaultRules.data.rules[index].name);
|
||||
|
||||
const defaultLabels = defaultRules.data.rules[index].labels;
|
||||
|
||||
expect(label).to.be.equals(defaultLabels['severity']);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Rules are Deleted', async () => {
|
||||
cy
|
||||
.intercept('**/rules/**', {
|
||||
body: {
|
||||
data: 'Deleted',
|
||||
message: 'Success',
|
||||
},
|
||||
statusCode: 200,
|
||||
})
|
||||
.as('deleteRules');
|
||||
|
||||
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
|
||||
const firstDelete = e[0];
|
||||
|
||||
firstDelete.click();
|
||||
});
|
||||
|
||||
cy.wait('@deleteRules');
|
||||
|
||||
cy.get('tbody').then((e) => {
|
||||
const trray = e.children().get();
|
||||
expect(trray.length).to.be.equals(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,26 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
// ***********************************************************
|
||||
// This example plugins/index.js can be used to load plugins
|
||||
//
|
||||
// You can change the location of this file or turn off loading
|
||||
// the plugins file with the 'pluginsFile' configuration option.
|
||||
//
|
||||
// You can read more here:
|
||||
// https://on.cypress.io/plugins-guide
|
||||
// ***********************************************************
|
||||
|
||||
// This function is called when a project is opened or re-opened (e.g. due to
|
||||
// the project's config changing)
|
||||
|
||||
// cypress/plugins/index.ts
|
||||
|
||||
/// <reference types="cypress" />
|
||||
|
||||
/**
|
||||
* @type {Cypress.PluginConfig}
|
||||
*/
|
||||
module.exports = (): void => {
|
||||
return undefined;
|
||||
};
|
||||
|
||||
export {};
|
||||
@@ -1,24 +0,0 @@
|
||||
import '@testing-library/cypress/add-commands';
|
||||
|
||||
import CheckRouteDefaultGlobalTimeOptions, {
|
||||
CheckRouteDefaultGlobalTimeOptionsProps,
|
||||
} from '../CustomFunctions/checkRouteDefaultGlobalTimeOptions';
|
||||
import Login, { LoginProps } from '../CustomFunctions/Login';
|
||||
|
||||
Cypress.Commands.add('login', Login);
|
||||
Cypress.Commands.add(
|
||||
'checkDefaultGlobalOption',
|
||||
CheckRouteDefaultGlobalTimeOptions,
|
||||
);
|
||||
|
||||
declare global {
|
||||
// eslint-disable-next-line @typescript-eslint/no-namespace
|
||||
namespace Cypress {
|
||||
interface Chainable {
|
||||
login(props: LoginProps): void;
|
||||
checkDefaultGlobalOption(
|
||||
props: CheckRouteDefaultGlobalTimeOptionsProps,
|
||||
): void;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// ***********************************************************
|
||||
// This example support/index.js is processed and
|
||||
// loaded automatically before your test files.
|
||||
//
|
||||
// This is a great place to put global configuration and
|
||||
// behavior that modifies Cypress.
|
||||
//
|
||||
// You can change the location of this file or turn off
|
||||
// automatically serving support files with the
|
||||
// 'supportFile' configuration option.
|
||||
//
|
||||
// You can read more here:
|
||||
// https://on.cypress.io/configuration
|
||||
// ***********************************************************
|
||||
|
||||
// Import commands.js using ES2015 syntax:
|
||||
import './commands';
|
||||
|
||||
// Alternatively you can use CommonJS syntax:
|
||||
// require('./commands')
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"target": "es5",
|
||||
"lib": ["es5", "dom"],
|
||||
"compilerOptions": {
|
||||
"noEmit": true,
|
||||
// be explicit about types included
|
||||
// to avoid clashing with Jest types
|
||||
"types": ["cypress", "@testing-library/cypress", "node"],
|
||||
"isolatedModules": false
|
||||
},
|
||||
"include": ["../node_modules/cypress", "./**/*.ts"]
|
||||
}
|
||||
@@ -9,15 +9,27 @@ const config: Config.InitialOptions = {
|
||||
moduleNameMapper: {
|
||||
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||
},
|
||||
notify: true,
|
||||
notifyMode: 'always',
|
||||
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
||||
transform: {
|
||||
'\\.(js|jsx|ts|tsx)?$': 'babel-jest',
|
||||
globals: {
|
||||
extensionsToTreatAsEsm: ['.ts'],
|
||||
'ts-jest': {
|
||||
useESM: true,
|
||||
},
|
||||
},
|
||||
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
||||
preset: 'ts-jest/presets/js-with-ts-esm',
|
||||
transform: {
|
||||
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||
},
|
||||
transformIgnorePatterns: ['node_modules/(?!(lodash-es)/)'],
|
||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
moduleDirectories: ['node_modules', 'src'],
|
||||
testEnvironmentOptions: {
|
||||
'jest-playwright': {
|
||||
browsers: ['chromium', 'firefox', 'webkit'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
||||
@@ -2,3 +2,4 @@
|
||||
* Adds custom matchers from the react testing library to all tests
|
||||
*/
|
||||
import '@testing-library/jest-dom';
|
||||
import 'jest-styled-components';
|
||||
|
||||
@@ -7,26 +7,34 @@
|
||||
"dev": "cross-env NODE_ENV=development webpack serve --progress",
|
||||
"build": "webpack --config=webpack.config.prod.js --progress",
|
||||
"prettify": "prettier --write .",
|
||||
"lint": "eslint . --debug",
|
||||
"lint:fix": "eslint . --fix --debug",
|
||||
"cypress:open": "cypress open",
|
||||
"cypress:run": "cypress run",
|
||||
"lint": "eslint ./src",
|
||||
"lint:fix": "eslint ./src --fix",
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch",
|
||||
"bundle:size": "bundlesize"
|
||||
"postinstall": "is-ci || yarn husky:configure",
|
||||
"playwright": "NODE_ENV=testing playwright test --config=./playwright.config.ts",
|
||||
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
|
||||
"playwright:codegen:local":"playwright codegen http://localhost:3301",
|
||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||
"commitlint": "commitlint --edit $1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.13.0"
|
||||
"node": ">=16.15.0"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ant-design/colors": "^6.0.0",
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@grafana/data": "^8.4.3",
|
||||
"@monaco-editor/react": "^4.3.1",
|
||||
"@testing-library/jest-dom": "^5.11.4",
|
||||
"@testing-library/react": "^11.1.0",
|
||||
"@testing-library/user-event": "^12.1.10",
|
||||
"antd": "^4.16.13",
|
||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||
"@xstate/react": "^3.0.0",
|
||||
"antd": "4.19.2",
|
||||
"axios": "^0.21.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^26.6.0",
|
||||
@@ -36,6 +44,8 @@
|
||||
"babel-preset-react-app": "^10.0.0",
|
||||
"chart.js": "^3.4.0",
|
||||
"chartjs-adapter-date-fns": "^2.0.0",
|
||||
"chartjs-plugin-annotation": "^1.4.0",
|
||||
"color": "^4.2.1",
|
||||
"cross-env": "^7.0.3",
|
||||
"css-loader": "4.3.0",
|
||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||
@@ -47,31 +57,41 @@
|
||||
"file-loader": "6.1.1",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.1.0",
|
||||
"jest": "26.6.0",
|
||||
"i18next": "^21.6.12",
|
||||
"i18next-browser-languagedetector": "^6.1.3",
|
||||
"i18next-http-backend": "^1.3.2",
|
||||
"jest": "^27.5.1",
|
||||
"js-base64": "^3.7.2",
|
||||
"less": "^4.1.2",
|
||||
"less-loader": "^10.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
"mini-css-extract-plugin": "2.4.5",
|
||||
"monaco-editor": "^0.30.0",
|
||||
"react": "17.0.0",
|
||||
"react-dom": "17.0.0",
|
||||
"react-force-graph": "^1.41.0",
|
||||
"react-graph-vis": "^1.0.5",
|
||||
"react-grid-layout": "^1.2.5",
|
||||
"react-grid-layout": "^1.3.4",
|
||||
"react-i18next": "^11.16.1",
|
||||
"react-query": "^3.34.19",
|
||||
"react-redux": "^7.2.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-use": "^17.3.2",
|
||||
"react-vis": "^1.11.7",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"stream": "^0.0.2",
|
||||
"style-loader": "1.3.0",
|
||||
"styled-components": "^5.2.1",
|
||||
"terser-webpack-plugin": "^5.2.5",
|
||||
"timestamp-nano": "^1.0.0",
|
||||
"ts-node": "^10.2.1",
|
||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||
"typescript": "^4.0.5",
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "^5.23.0",
|
||||
"webpack-dev-server": "^4.3.1"
|
||||
"webpack-dev-server": "^4.3.1",
|
||||
"xstate": "^4.31.0"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -92,13 +112,19 @@
|
||||
"@babel/preset-env": "^7.12.17",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.12.17",
|
||||
"@testing-library/cypress": "^8.0.0",
|
||||
"@commitlint/cli": "^16.2.4",
|
||||
"@commitlint/config-conventional": "^16.2.4",
|
||||
"@jest/globals": "^27.5.1",
|
||||
"@playwright/test": "^1.22.0",
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/color": "^3.0.3",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/d3": "^6.2.0",
|
||||
"@types/d3-tip": "^3.5.5",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/jest": "^27.5.1",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/mini-css-extract-plugin": "^2.5.1",
|
||||
"@types/node": "^16.10.3",
|
||||
"@types/react": "^17.0.0",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
@@ -113,32 +139,48 @@
|
||||
"@types/webpack-dev-server": "^4.3.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
||||
"@typescript-eslint/parser": "^4.28.2",
|
||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"bundlesize": "^0.18.1",
|
||||
"compression-webpack-plugin": "^9.0.0",
|
||||
"compression-webpack-plugin": "9.0.0",
|
||||
"copy-webpack-plugin": "^8.1.0",
|
||||
"critters-webpack-plugin": "^3.0.1",
|
||||
"cypress": "^8.3.0",
|
||||
"eslint": "^7.30.0",
|
||||
"eslint-config-airbnb": "^19.0.4",
|
||||
"eslint-config-airbnb-typescript": "^16.1.4",
|
||||
"eslint-config-prettier": "^8.3.0",
|
||||
"eslint-config-standard": "^16.0.3",
|
||||
"eslint-plugin-import": "^2.23.4",
|
||||
"eslint-plugin-import": "^2.25.4",
|
||||
"eslint-plugin-jest": "^26.1.2",
|
||||
"eslint-plugin-jsx-a11y": "^6.5.1",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-prettier": "^4.0.0",
|
||||
"eslint-plugin-promise": "^5.1.0",
|
||||
"eslint-plugin-react": "^7.24.0",
|
||||
"eslint-plugin-react-hooks": "^4.3.0",
|
||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||
"husky": "4.3.8",
|
||||
"eslint-plugin-sonarjs": "^0.12.0",
|
||||
"husky": "^7.0.4",
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.0",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "10.5.3",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lint-staged": "^12.3.7",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"prettier": "2.2.1",
|
||||
"react-hot-loader": "^4.13.0",
|
||||
"ts-jest": "^27.1.4",
|
||||
"ts-node": "^10.2.1",
|
||||
"typescript-plugin-css-modules": "^3.4.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0",
|
||||
"webpack-cli": "^4.5.0"
|
||||
"webpack-cli": "^4.9.2"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.(js|jsx|ts|tsx)": [
|
||||
"eslint --fix"
|
||||
]
|
||||
},
|
||||
"resolutions": {
|
||||
"@types/react": "17.0.0",
|
||||
"@types/react-dom": "17.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
23
frontend/playwright.config.ts
Normal file
23
frontend/playwright.config.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { PlaywrightTestConfig } from '@playwright/test';
|
||||
import dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const config: PlaywrightTestConfig = {
|
||||
forbidOnly: !!process.env.CI,
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
preserveOutput: 'always',
|
||||
name: 'Signoz',
|
||||
testDir: './tests',
|
||||
use: {
|
||||
trace: 'retain-on-failure',
|
||||
baseURL: process.env.PLAYWRIGHT_TEST_BASE_URL || 'http://localhost:3301',
|
||||
},
|
||||
updateSnapshots: 'all',
|
||||
fullyParallel: false,
|
||||
quiet: true,
|
||||
testMatch: ['**/*.spec.ts'],
|
||||
reporter: process.env.CI ? 'github' : 'list',
|
||||
};
|
||||
|
||||
export default config;
|
||||
85
frontend/public/locales/en-GB/alerts.json
Normal file
85
frontend/public/locales/en-GB/alerts.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||
"preview_chart_threshold_label": "Threshold",
|
||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||
"button_yes": "Yes",
|
||||
"button_no": "No",
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||
"rule_created": "Rule created successfully",
|
||||
"rule_edited": "Rule edited successfully",
|
||||
"expression_missing": "expression is missing in {{where}}",
|
||||
"metricname_missing": "metric name is missing in {{where}}",
|
||||
"condition_required": "at least one metric condition is required",
|
||||
"alertname_required": "alert name is required",
|
||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||
"button_savechanges": "Save Rule",
|
||||
"button_createrule": "Create Rule",
|
||||
"button_returntorules": "Return to rules",
|
||||
"button_cancelchanges": "Cancel",
|
||||
"button_discard": "Discard",
|
||||
"text_condition1": "Send a notification when the metric is",
|
||||
"text_condition2": "the threshold",
|
||||
"text_condition3": "during the last",
|
||||
"option_5min": "5 mins",
|
||||
"option_10min": "10 mins",
|
||||
"option_15min": "15 mins",
|
||||
"option_60min": "60 mins",
|
||||
"option_4hours": "4 hours",
|
||||
"option_24hours": "24 hours",
|
||||
"field_threshold": "Alert Threshold",
|
||||
"option_allthetimes": "all the times",
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
"option_notequal": "not equal to",
|
||||
"button_query": "Query",
|
||||
"button_formula": "Formula",
|
||||
"tab_qb": "Query Builder",
|
||||
"tab_promql": "PromQL",
|
||||
"title_confirm": "Confirm",
|
||||
"button_ok": "Yes",
|
||||
"button_cancel": "No",
|
||||
"field_promql_expr": "PromQL Expression",
|
||||
"field_alert_name": "Alert Name",
|
||||
"field_alert_desc": "Alert Description",
|
||||
"field_labels": "Labels",
|
||||
"field_severity": "Severity",
|
||||
"option_critical": "Critical",
|
||||
"option_error": "Error",
|
||||
"option_warning": "Warning",
|
||||
"option_info": "Info",
|
||||
"user_guide_headline": "Steps to create an Alert",
|
||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_tooltip_more_help": "More details on how to create alerts"
|
||||
}
|
||||
48
frontend/public/locales/en-GB/channels.json
Normal file
48
frontend/public/locales/en-GB/channels.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"page_title_create": "New Notification Channels",
|
||||
"page_title_edit": "Edit Notification Channels",
|
||||
"button_save_channel": "Save",
|
||||
"button_test_channel": "Test",
|
||||
"button_return": "Back",
|
||||
"field_channel_name": "Name",
|
||||
"field_channel_type": "Type",
|
||||
"field_webhook_url": "Webhook URL",
|
||||
"field_slack_recipient": "Recipient",
|
||||
"field_slack_title": "Title",
|
||||
"field_slack_description": "Description",
|
||||
"field_webhook_username": "User Name (optional)",
|
||||
"field_webhook_password": "Password (optional)",
|
||||
"field_pager_routing_key": "Routing Key",
|
||||
"field_pager_description": "Description",
|
||||
"field_pager_severity": "Severity",
|
||||
"field_pager_details": "Additional Information",
|
||||
"field_pager_component": "Component",
|
||||
"field_pager_group": "Group",
|
||||
"field_pager_class": "Class",
|
||||
"field_pager_client": "Client",
|
||||
"field_pager_client_url": "Client URL",
|
||||
"placeholder_slack_description": "Description",
|
||||
"placeholder_pager_description": "Description",
|
||||
"help_pager_client": "Shows up as event source in Pagerduty",
|
||||
"help_pager_client_url": "Shows up as event source link in Pagerduty",
|
||||
"help_pager_class": "The class/type of the event",
|
||||
"help_pager_details": "Specify a key-value format (must be a valid json)",
|
||||
"help_pager_group": "A cluster or grouping of sources",
|
||||
"help_pager_component": "The part or component of the affected system that is broke",
|
||||
"help_pager_severity": "Severity of the incident, must be one of: must be one of the following: 'critical', 'warning', 'error' or 'info'",
|
||||
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
|
||||
"help_webhook_password": "Specify a password or bearer token",
|
||||
"help_pager_description": "Shows up as description in pagerduty",
|
||||
"channel_creation_done": "Successfully created the channel",
|
||||
"channel_creation_failed": "An unexpected error occurred while creating this channel",
|
||||
"channel_edit_done": "Channels Edited Successfully",
|
||||
"channel_edit_failed": "An unexpected error occurred while updating this channel",
|
||||
"selected_channel_invalid": "Channel type selected is invalid",
|
||||
"username_no_password": "A Password must be provided with user name",
|
||||
"test_unsupported": "Sorry, this channel type does not support test yet",
|
||||
"channel_test_done": "An alert has been sent to this channel",
|
||||
"channel_test_failed": "Failed to send a test message to this channel, please confirm that the parameters are set correctly",
|
||||
"channel_test_unexpected": "An unexpected error occurred while sending a message to this channel, please try again",
|
||||
"webhook_url_required": "Webhook URL is mandatory",
|
||||
"slack_channel_help": "Specify channel or user, use #channel-name, @username (has to be all lowercase, no whitespace)"
|
||||
}
|
||||
10
frontend/public/locales/en-GB/common.json
Normal file
10
frontend/public/locales/en-GB/common.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"something_went_wrong": "Something went wrong",
|
||||
"already_logged_in": "Already Logged In",
|
||||
"success": "Success",
|
||||
"cancel": "Cancel",
|
||||
"share": "Share",
|
||||
"save": "Save",
|
||||
"edit": "Edit",
|
||||
"logged_in": "Logged In"
|
||||
}
|
||||
16
frontend/public/locales/en-GB/dashboard.json
Normal file
16
frontend/public/locales/en-GB/dashboard.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"create_dashboard": "Create Dashboard",
|
||||
"import_json": "Import JSON",
|
||||
"copy_to_clipboard": "Copy To ClipBoard",
|
||||
"download_json": "Download JSON",
|
||||
"view_json": "View JSON",
|
||||
"export_dashboard": "Export this dashboard.",
|
||||
"upload_json_file": "Upload JSON file",
|
||||
"paste_json_below": "Paste JSON below",
|
||||
"error_upload_json": "Invalid JSON",
|
||||
"load_json": "Load JSON",
|
||||
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
|
||||
"error_loading_json": "Error loading JSON file",
|
||||
"empty_json_not_allowed": "Empty JSON is not allowed",
|
||||
"new_dashboard_title": "Sample Title"
|
||||
}
|
||||
7
frontend/public/locales/en-GB/errorDetails.json
Normal file
7
frontend/public/locales/en-GB/errorDetails.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"see_trace_graph": "See what happened before and after this error in a trace graph",
|
||||
"see_error_in_trace_graph": "See the error in trace graph",
|
||||
"stack_trace": "Stacktrace",
|
||||
"older": "Older",
|
||||
"newer": "Newer"
|
||||
}
|
||||
21
frontend/public/locales/en-GB/generalSettings.json
Normal file
21
frontend/public/locales/en-GB/generalSettings.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"total_retention_period": "Total Retention Period",
|
||||
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
|
||||
"status_message": {
|
||||
"success": "Your last call to change retention period to {{total_retention}} {{s3_part}} was successful.",
|
||||
"failed": "Your last call to change retention period to {{total_retention}} {{s3_part}} failed. Please try again.",
|
||||
"pending": "Your last call to change retention period to {{total_retention}} {{s3_part}} is pending. This may take some time.",
|
||||
"s3_part": "and S3 to {{s3_retention}}"
|
||||
},
|
||||
"retention_save_button": {
|
||||
"pending": "Updating {{name}} retention period",
|
||||
"success": "Save"
|
||||
},
|
||||
"retention_request_race_condition": "Your request to change retention period has failed, as another request is still in process.",
|
||||
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
|
||||
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
|
||||
"retention_comparison_error": "Total retention period for {{name}} can’t be lower or equal to the period after which data is moved to s3.",
|
||||
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
|
||||
"retention_confirmation": "Are you sure you want to change the retention period?",
|
||||
"retention_confirmation_description": "This will change the amount of storage needed for saving {{name}}."
|
||||
}
|
||||
13
frontend/public/locales/en-GB/organizationsettings.json
Normal file
13
frontend/public/locales/en-GB/organizationsettings.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"display_name": "Display Name",
|
||||
"signoz": "SigNoz",
|
||||
"email_address": "Email address",
|
||||
"name_optional": "Name (optional)",
|
||||
"role": "Role",
|
||||
"email_placeholder": "john@signoz.io",
|
||||
"name_placeholder": "John",
|
||||
"add_another_team_member": "Add another team member",
|
||||
"invite_team_members": "Invite team members",
|
||||
"invite_members": "Invite Members",
|
||||
"pending_invites": "Pending Invites"
|
||||
}
|
||||
9
frontend/public/locales/en-GB/routes.json
Normal file
9
frontend/public/locales/en-GB/routes.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"general": "General",
|
||||
"alert_channels": "Alert Channels",
|
||||
"organization_settings": "Organization Settings",
|
||||
"my_settings": "My Settings",
|
||||
"overview_metrics": "Overview Metrics",
|
||||
"dbcall_metrics": "Database Calls",
|
||||
"external_metrics": "External Calls"
|
||||
}
|
||||
85
frontend/public/locales/en-GB/rules.json
Normal file
85
frontend/public/locales/en-GB/rules.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"preview_chart_unexpected_error": "An unexpeced error occurred updating the chart, please check your query.",
|
||||
"preview_chart_threshold_label": "Threshold",
|
||||
"placeholder_label_key_pair": "Click here to enter a label (key value pairs)",
|
||||
"button_yes": "Yes",
|
||||
"button_no": "No",
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
"confirm_save_content_part2": "query will be saved. Press OK to confirm.",
|
||||
"unexpected_error": "Sorry, an unexpected error occurred. Please contact your admin",
|
||||
"rule_created": "Rule created successfully",
|
||||
"rule_edited": "Rule edited successfully",
|
||||
"expression_missing": "expression is missing in {{where}}",
|
||||
"metricname_missing": "metric name is missing in {{where}}",
|
||||
"condition_required": "at least one metric condition is required",
|
||||
"alertname_required": "alert name is required",
|
||||
"promql_required": "promql expression is required when query format is set to PromQL",
|
||||
"button_savechanges": "Save Rule",
|
||||
"button_createrule": "Create Rule",
|
||||
"button_returntorules": "Return to rules",
|
||||
"button_cancelchanges": "Cancel",
|
||||
"button_discard": "Discard",
|
||||
"text_condition1": "Send a notification when the metric is",
|
||||
"text_condition2": "the threshold",
|
||||
"text_condition3": "during the last",
|
||||
"option_5min": "5 mins",
|
||||
"option_10min": "10 mins",
|
||||
"option_15min": "15 mins",
|
||||
"option_60min": "60 mins",
|
||||
"option_4hours": "4 hours",
|
||||
"option_24hours": "24 hours",
|
||||
"field_threshold": "Alert Threshold",
|
||||
"option_allthetimes": "all the times",
|
||||
"option_atleastonce": "at least once",
|
||||
"option_onaverage": "on average",
|
||||
"option_intotal": "in total",
|
||||
"option_above": "above",
|
||||
"option_below": "below",
|
||||
"option_equal": "is equal to",
|
||||
"option_notequal": "not equal to",
|
||||
"button_query": "Query",
|
||||
"button_formula": "Formula",
|
||||
"tab_qb": "Query Builder",
|
||||
"tab_promql": "PromQL",
|
||||
"title_confirm": "Confirm",
|
||||
"button_ok": "Yes",
|
||||
"button_cancel": "No",
|
||||
"field_promql_expr": "PromQL Expression",
|
||||
"field_alert_name": "Alert Name",
|
||||
"field_alert_desc": "Alert Description",
|
||||
"field_labels": "Labels",
|
||||
"field_severity": "Severity",
|
||||
"option_critical": "Critical",
|
||||
"option_error": "Error",
|
||||
"option_warning": "Warning",
|
||||
"option_info": "Info",
|
||||
"user_guide_headline": "Steps to create an Alert",
|
||||
"user_guide_qb_step1": "Step 1 - Define the metric",
|
||||
"user_guide_qb_step1a": "Choose a metric which you want to create an alert on",
|
||||
"user_guide_qb_step1b": "Filter it based on WHERE field or GROUPBY if needed",
|
||||
"user_guide_qb_step1c": "Apply an aggregatiion function like COUNT, SUM, etc. or choose NOOP to plot the raw metric",
|
||||
"user_guide_qb_step1d": "Create a formula based on Queries if needed",
|
||||
"user_guide_qb_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_qb_step2a": "Select the evaluation interval, threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_qb_step2b": "Enter the Alert threshold",
|
||||
"user_guide_qb_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_qb_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_qb_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_guide_pql_step1": "Step 1 - Define the metric",
|
||||
"user_guide_pql_step1a": "Write a PromQL query for the metric",
|
||||
"user_guide_pql_step1b": "Format the legends based on labels you want to highlight",
|
||||
"user_guide_pql_step2": "Step 2 - Define Alert Conditions",
|
||||
"user_guide_pql_step2a": "Select the threshold type and whether you want to alert above/below a value",
|
||||
"user_guide_pql_step2b": "Enter the Alert threshold",
|
||||
"user_guide_pql_step3": "Step 3 -Alert Configuration",
|
||||
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
|
||||
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
|
||||
"user_tooltip_more_help": "More details on how to create alerts"
|
||||
}
|
||||
5
frontend/public/locales/en-GB/settings.json
Normal file
5
frontend/public/locales/en-GB/settings.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"current_password": "Current Password",
|
||||
"new_password": "New Password",
|
||||
"change_password": "Change Password"
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user