Compare commits
619 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58b0c08d71 | ||
|
|
dd9cbcee33 | ||
|
|
897728cc71 | ||
|
|
bdf78cbf2c | ||
|
|
90566360ae | ||
|
|
0a6fa0ee85 | ||
|
|
73c2137cd7 | ||
|
|
dbe68c064c | ||
|
|
ba7427f280 | ||
|
|
6dbc11991b | ||
|
|
eeae71163c | ||
|
|
a25e7a64ce | ||
|
|
d0e272b679 | ||
|
|
47e6e00a64 | ||
|
|
282c47def8 | ||
|
|
9d3fc493a3 | ||
|
|
b2afb9aabc | ||
|
|
a733adad2c | ||
|
|
cc18cc9087 | ||
|
|
ecb2ed8ac8 | ||
|
|
31931e5a6c | ||
|
|
31848c488d | ||
|
|
bdcc997672 | ||
|
|
d68334b2ca | ||
|
|
3ebded66ea | ||
|
|
2ed24df250 | ||
|
|
5a34ce2221 | ||
|
|
aae6a1adf1 | ||
|
|
bef83d30cc | ||
|
|
1ebf3dbf65 | ||
|
|
f57808bdb4 | ||
|
|
6bdcd4f5bb | ||
|
|
d726ad9ca6 | ||
|
|
4ed3295b80 | ||
|
|
72dc4d62ce | ||
|
|
186f4dca71 | ||
|
|
e4f2219f8c | ||
|
|
fe9a6c2448 | ||
|
|
5c2a875211 | ||
|
|
6dab77409d | ||
|
|
0f811af34e | ||
|
|
bdbcbb5f6c | ||
|
|
ae91d7e8a9 | ||
|
|
64927acd97 | ||
|
|
9dae957c8f | ||
|
|
afbcde5edc | ||
|
|
b8c3fd1cbf | ||
|
|
93cf5dfa46 | ||
|
|
d2c28a47c2 | ||
|
|
9c68c6af93 | ||
|
|
3771f85c7d | ||
|
|
b39e0465b0 | ||
|
|
bc97ea8fc0 | ||
|
|
1e980c3886 | ||
|
|
5ec52f03ad | ||
|
|
4aab923e40 | ||
|
|
17b0ee5434 | ||
|
|
08c3c4c51c | ||
|
|
5f802e0e20 | ||
|
|
63e663a92d | ||
|
|
d21ab7b82d | ||
|
|
84b876170d | ||
|
|
88d8dba90e | ||
|
|
d7d0d70aa5 | ||
|
|
671b441ec9 | ||
|
|
729c7fce7b | ||
|
|
224ec8d0d9 | ||
|
|
7eed865660 | ||
|
|
241121ebec | ||
|
|
15af158a9c | ||
|
|
2f02aeb031 | ||
|
|
3603e497a6 | ||
|
|
070d32a0ef | ||
|
|
0b36da714f | ||
|
|
ce0ac1e3af | ||
|
|
bcb5256de0 | ||
|
|
fdca72b9b2 | ||
|
|
7f64dfd023 | ||
|
|
8871d53ae0 | ||
|
|
2313ec3f9a | ||
|
|
56208c9b06 | ||
|
|
84e281271c | ||
|
|
43e4f637d1 | ||
|
|
c156b9c403 | ||
|
|
9885572842 | ||
|
|
4803fd9c8e | ||
|
|
c2fe35388e | ||
|
|
ba5e3dcfd3 | ||
|
|
9c8c31d912 | ||
|
|
469254e9fc | ||
|
|
1f2ec0d728 | ||
|
|
ff1fc83b66 | ||
|
|
0a5eff2255 | ||
|
|
24e84bac2a | ||
|
|
db00a78a4e | ||
|
|
4d2e8b0ea5 | ||
|
|
4f12f8c85c | ||
|
|
fabab345cb | ||
|
|
00355b3383 | ||
|
|
c16ae790d4 | ||
|
|
c6d57a7a53 | ||
|
|
d8775c91d7 | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
b3dfd567e0 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
660391c360 | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
8a0bcf6cd9 | ||
|
|
0c06c5ee0e | ||
|
|
f3610ffe55 | ||
|
|
d150cfa46c | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
3995de16f0 | ||
|
|
f149258de2 | ||
|
|
da386b0e8e | ||
|
|
75cdac376f | ||
|
|
0ef13a89ed | ||
|
|
084d8ecccd | ||
|
|
b9f3663b6c | ||
|
|
4067aa5025 | ||
|
|
ebf9316714 | ||
|
|
f5009abca6 | ||
|
|
b16a793cbc | ||
|
|
374a2415d9 | ||
|
|
3789e25a1e | ||
|
|
10ab057e29 | ||
|
|
41b9129145 | ||
|
|
f5d10b72f0 | ||
|
|
6fb6a576aa | ||
|
|
7cf567792a | ||
|
|
fe18e85e36 | ||
|
|
147476d802 | ||
|
|
c94f23a710 | ||
|
|
1a2ef4fde6 | ||
|
|
6c505f9e86 | ||
|
|
fb97540c7c | ||
|
|
9cf5c7ef74 | ||
|
|
6223e89d4c | ||
|
|
62f8cddc27 | ||
|
|
ffae767fab | ||
|
|
c23f97c3d0 | ||
|
|
11eb1e4f72 | ||
|
|
0554ed7ecb | ||
|
|
ca4ce0d380 | ||
|
|
65f50bb70d | ||
|
|
dbe9f3a034 | ||
|
|
7cdd136f61 | ||
|
|
21d5e0b71c | ||
|
|
fe53aa412b | ||
|
|
6c5a48082b | ||
|
|
b7adc27f02 | ||
|
|
67b4290846 | ||
|
|
8a7cbc8ad3 | ||
|
|
c74d87a21a | ||
|
|
6486425f46 | ||
|
|
5b316afa12 | ||
|
|
2dcc6fda77 | ||
|
|
a2f570d78c | ||
|
|
ef209e11d5 | ||
|
|
1851e76bca | ||
|
|
fa23050916 | ||
|
|
79475bde71 | ||
|
|
039201acae | ||
|
|
22454abc4a | ||
|
|
4c8b7af0eb | ||
|
|
5caf94f024 | ||
|
|
ce0b37ca2e | ||
|
|
5f529e1c10 | ||
|
|
05c923df9b | ||
|
|
90637212bc | ||
|
|
b58f45c268 | ||
|
|
6a6fd44719 | ||
|
|
81cc120539 | ||
|
|
831381a1ff | ||
|
|
fd0656e0fc | ||
|
|
e217ea0c9c | ||
|
|
bdf9333dcf | ||
|
|
eae97d6ffc | ||
|
|
9f5241e82c | ||
|
|
284eda4072 | ||
|
|
63693a4185 | ||
|
|
d9cf9071d3 | ||
|
|
5e41c7f62b | ||
|
|
e903277143 | ||
|
|
f2def38df8 | ||
|
|
71e742fb2b | ||
|
|
571e087f31 | ||
|
|
3e5f9f3b25 | ||
|
|
c969b5f329 | ||
|
|
5bcf42d398 | ||
|
|
c81b0b2a8b | ||
|
|
d52308c9b5 | ||
|
|
7948bca710 | ||
|
|
29c0b43481 | ||
|
|
9351fd09c2 | ||
|
|
59f32884d2 | ||
|
|
6ccdc5296e | ||
|
|
3ef9d96678 | ||
|
|
642ece288e | ||
|
|
3ab4f71aa1 | ||
|
|
b5be770a03 | ||
|
|
08e3428744 | ||
|
|
b335d440cf | ||
|
|
1293378c5c | ||
|
|
5424c7714f | ||
|
|
95311db543 | ||
|
|
bf52722689 | ||
|
|
6064840dd1 | ||
|
|
182adc551c | ||
|
|
2b5b79e34a | ||
|
|
508c6ced80 | ||
|
|
3c2173de9e | ||
|
|
9a6bcaadf8 | ||
|
|
08bbb0259d | ||
|
|
93638d5615 | ||
|
|
844ca57686 | ||
|
|
b2eec25f33 | ||
|
|
61d01fa2d5 | ||
|
|
a6bf6e4e07 | ||
|
|
d454482f43 | ||
|
|
f6aece6349 | ||
|
|
dc9508269d | ||
|
|
a6c41f312d | ||
|
|
f487f7420b | ||
|
|
da8f3a6e81 | ||
|
|
d102c94670 | ||
|
|
60288f7ba0 | ||
|
|
0cbe17a315 | ||
|
|
dce9f36a8e | ||
|
|
aa5100261d | ||
|
|
f4cc2a3a05 | ||
|
|
041a5249b3 | ||
|
|
a767697a86 | ||
|
|
71cb70c62c | ||
|
|
647cabc4f4 | ||
|
|
e864e33ad3 | ||
|
|
5bdbe792f5 | ||
|
|
399efb0fb2 | ||
|
|
4b72de6884 | ||
|
|
9f1473e7de | ||
|
|
d6c4df8b4b | ||
|
|
7150971dc0 | ||
|
|
d0846b8dd2 | ||
|
|
ead6885b29 | ||
|
|
d72dacdc1f | ||
|
|
1d6ddd4890 | ||
|
|
58daca1579 | ||
|
|
1e522ad8f1 | ||
|
|
8809105a8d | ||
|
|
064c3e0449 | ||
|
|
2a348e916c | ||
|
|
5744193f50 | ||
|
|
ccf352f2db | ||
|
|
6e446dc0ab | ||
|
|
566c2becdf | ||
|
|
3b3fd2b3a9 | ||
|
|
eae53d9eff | ||
|
|
42842b6b17 | ||
|
|
95f8dfb4bc | ||
|
|
a8c5934fc5 | ||
|
|
3f2a4d6eac | ||
|
|
170609a81f | ||
|
|
76fccbbba4 | ||
|
|
147ed9f24b | ||
|
|
a69bc321a9 | ||
|
|
c9e02a8b25 | ||
|
|
24d6a1e7b2 | ||
|
|
a0efa63185 | ||
|
|
fd83cea9a0 | ||
|
|
5be1eb58b2 | ||
|
|
8367c106bc | ||
|
|
8064ae1f37 | ||
|
|
ab4d9af442 | ||
|
|
eb0d3374d5 | ||
|
|
6c4c814b3f | ||
|
|
32e8e48928 | ||
|
|
53e7037f48 | ||
|
|
a566b5dc97 | ||
|
|
4dc668fd13 | ||
|
|
d085506d3e | ||
|
|
1b28a4e6f5 | ||
|
|
20e924b116 | ||
|
|
1d28ceb3d7 | ||
|
|
0ff4c040bf | ||
|
|
1002ab553e | ||
|
|
3dc94c8da7 | ||
|
|
5a5aca2113 | ||
|
|
cb22117a0f | ||
|
|
739946fa47 | ||
|
|
7939902f03 | ||
|
|
d34e08fa3d | ||
|
|
5556d1d6fc | ||
|
|
d4d1104a53 | ||
|
|
225a345baa | ||
|
|
31443dabe7 | ||
|
|
0efb901863 | ||
|
|
eb4abe900c | ||
|
|
e7ba5f9f33 | ||
|
|
995232e057 | ||
|
|
cc5d47e3ee | ||
|
|
b1de6c1d7d | ||
|
|
84bfe11285 | ||
|
|
ca78947a55 | ||
|
|
ac49f84982 | ||
|
|
cc47f02ebf | ||
|
|
ac70240b72 | ||
|
|
78b1a750fa | ||
|
|
d5a6336239 | ||
|
|
01bad0f18a | ||
|
|
1b79a9bf35 | ||
|
|
0426bf06eb | ||
|
|
3d8354fb99 | ||
|
|
696241b962 | ||
|
|
8a883f1b5e | ||
|
|
7765cee610 | ||
|
|
b958bad81f | ||
|
|
deff5d5e17 | ||
|
|
44d3f35a5f | ||
|
|
36d8bc7bc6 | ||
|
|
565dfd5b52 | ||
|
|
897c5d2371 | ||
|
|
f22d5f0fbd | ||
|
|
8c56d04988 | ||
|
|
18cfc40982 | ||
|
|
3c66f9d2dd | ||
|
|
4f3bb95a77 | ||
|
|
8aa5eb78b2 | ||
|
|
79a1f79b7c | ||
|
|
1c5c65ddf7 | ||
|
|
b2e78b9358 | ||
|
|
5e02bfe2e4 | ||
|
|
02d89a3a04 | ||
|
|
3ab0e1395a | ||
|
|
f1f606844a | ||
|
|
4e335054fb | ||
|
|
c902a6bac8 | ||
|
|
c00f0f159b | ||
|
|
86bdb9a5ad | ||
|
|
044f02c7c7 | ||
|
|
561d18efec | ||
|
|
ab10a699b1 | ||
|
|
e28733d246 | ||
|
|
a238123eb2 | ||
|
|
4337ab5cd0 | ||
|
|
7a3a3b8d89 | ||
|
|
a9cbd12330 | ||
|
|
c320c20280 | ||
|
|
b3c2fe75d3 | ||
|
|
95d3a27769 | ||
|
|
67f09c6def | ||
|
|
eaeba43179 | ||
|
|
daadc584ea | ||
|
|
da8b16f588 | ||
|
|
17738a58a2 | ||
|
|
3ebffae1c6 | ||
|
|
2ca67f1017 | ||
|
|
00c7eccb0c | ||
|
|
7f3d9e2e35 | ||
|
|
a95656b3a0 | ||
|
|
9404768f9d | ||
|
|
7559445ebe | ||
|
|
112766b265 | ||
|
|
ccf5af089d | ||
|
|
0b6f31420b | ||
|
|
b4ce805c6f | ||
|
|
08f24fbdff | ||
|
|
191925b418 | ||
|
|
84b70c970f | ||
|
|
988ce36047 | ||
|
|
24a4177a73 | ||
|
|
08ca3b7849 | ||
|
|
d0723207c3 | ||
|
|
16cf829ec3 | ||
|
|
23f9949fad | ||
|
|
fafdd4b87f | ||
|
|
f2ace729fd | ||
|
|
f0c627eebe | ||
|
|
1bf8e6bef6 | ||
|
|
f37e6ef1d1 | ||
|
|
c3ebbfa8ca | ||
|
|
12970d6975 | ||
|
|
1112ff7e7a | ||
|
|
09fd877b2a | ||
|
|
c04c0284dc | ||
|
|
239cdad57b | ||
|
|
314f95a914 | ||
|
|
e070ba61cd | ||
|
|
79576b476f | ||
|
|
8e4f987cf6 | ||
|
|
3fe3bde0c7 | ||
|
|
efe57ff15d | ||
|
|
b8a6a27fad | ||
|
|
fb3dbcf662 | ||
|
|
cb04979bb7 | ||
|
|
967b83a5d0 | ||
|
|
3fd086db4d | ||
|
|
9aedcc1777 | ||
|
|
0fb5b90e4e | ||
|
|
91bdb77a0e | ||
|
|
e7d2bb13dc | ||
|
|
39c3f67d86 | ||
|
|
49d1015a72 | ||
|
|
f3b2f30c82 | ||
|
|
ff9d81aefc | ||
|
|
eb63b6da2a | ||
|
|
c9b07ee5dd | ||
|
|
bda8ddc0e4 | ||
|
|
540a682bf0 | ||
|
|
9f7c60d2c1 | ||
|
|
80a06300ff | ||
|
|
7fae5207d1 | ||
|
|
b4f781ad47 | ||
|
|
d9468d438d | ||
|
|
25559c8781 | ||
|
|
aa760336d0 | ||
|
|
46aaa8199d | ||
|
|
10faa6f42b | ||
|
|
2d5ad346a6 | ||
|
|
207360e074 | ||
|
|
a4b954e304 | ||
|
|
3e24e371f4 | ||
|
|
bdeadaeff6 | ||
|
|
43b39f1a7c | ||
|
|
6ab7fea8de | ||
|
|
05371085f9 | ||
|
|
abe4940e74 | ||
|
|
938f42bd4f | ||
|
|
1652f35f1a | ||
|
|
3a67d0237c | ||
|
|
7888865ddc | ||
|
|
25fb0deb8d | ||
|
|
b067db633a | ||
|
|
9094f20070 | ||
|
|
9129704a93 | ||
|
|
fc244edc50 | ||
|
|
cbe635bc94 | ||
|
|
bb0a7a956a | ||
|
|
2800b021e6 | ||
|
|
74170ffb4a | ||
|
|
ab72e92fc6 | ||
|
|
8b224dd59c | ||
|
|
ce77a3cd80 | ||
|
|
79fa4e7b74 | ||
|
|
90e97856a9 | ||
|
|
61fb11a232 | ||
|
|
729ea586a8 | ||
|
|
eb28459847 | ||
|
|
10fc3bf456 | ||
|
|
244a07aef8 | ||
|
|
92cf617a53 | ||
|
|
1eb333a890 | ||
|
|
ebc280db0e | ||
|
|
3beb7f1843 | ||
|
|
56c9ea5430 | ||
|
|
05c79b7119 | ||
|
|
a6da00f801 | ||
|
|
0514c5035e | ||
|
|
2df058825a | ||
|
|
a07b8999c0 | ||
|
|
5405f0d9ed | ||
|
|
00cefe2306 | ||
|
|
8e621b6a70 | ||
|
|
5aa46c7e96 | ||
|
|
c367f928c5 | ||
|
|
4c2f287e06 | ||
|
|
567c81c0f9 | ||
|
|
31f0a9f0b6 | ||
|
|
bad14a7d09 | ||
|
|
28e8cffeaa | ||
|
|
3302a84ab5 | ||
|
|
baba0c389c | ||
|
|
0d98a4fd0c | ||
|
|
09344cfb44 | ||
|
|
9a00dca749 | ||
|
|
e7253b7ca6 | ||
|
|
dddba68bfd | ||
|
|
40c287028b | ||
|
|
30124de409 |
33
.editorconfig
Normal file
33
.editorconfig
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# EditorConfig is awesome: https://EditorConfig.org
|
||||||
|
|
||||||
|
# top-most EditorConfig file
|
||||||
|
root = true
|
||||||
|
|
||||||
|
# Unix-style newlines with a newline ending every file
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
# Matches multiple files with brace expansion notation
|
||||||
|
# Set default charset
|
||||||
|
[*.{js,py}]
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
# 4 space indentation
|
||||||
|
[*.py]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
# Tab indentation (no size specified)
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
||||||
|
|
||||||
|
# Indentation override for all JS under lib directory
|
||||||
|
[lib/**.js]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
# Matches the exact files either package.json or .travis.yml
|
||||||
|
[{package.json,.travis.yml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -2,5 +2,6 @@
|
|||||||
# Owners are automatically requested for review for PRs that changes code
|
# Owners are automatically requested for review for PRs that changes code
|
||||||
# that they own.
|
# that they own.
|
||||||
* @ankitnayan
|
* @ankitnayan
|
||||||
/frontend/ @palash-signoz
|
/frontend/ @palashgdev @pranshuchittora
|
||||||
/deploy/ @prashant-shahi
|
/deploy/ @prashant-shahi
|
||||||
|
/pkg/query-service/ @srikanthccv @makeavish @nityanandagohain
|
||||||
|
|||||||
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -26,6 +26,7 @@ assignees: ''
|
|||||||
* **Signoz version**:
|
* **Signoz version**:
|
||||||
* **Browser version**:
|
* **Browser version**:
|
||||||
* **Your OS and version**:
|
* **Your OS and version**:
|
||||||
|
* **Your CPU Architecture**(ARM/Intel):
|
||||||
|
|
||||||
## Additional context
|
## Additional context
|
||||||
|
|
||||||
|
|||||||
55
.github/workflows/build.yaml
vendored
55
.github/workflows/build.yaml
vendored
@@ -1,50 +1,27 @@
|
|||||||
name: build-pipeline
|
name: build-pipeline
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
|
- develop
|
||||||
- main
|
- main
|
||||||
- v*
|
- release/v*
|
||||||
paths:
|
|
||||||
- 'pkg/**'
|
|
||||||
- 'frontend/**'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
get_filters:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Set job outputs to values from filter step
|
|
||||||
outputs:
|
|
||||||
frontend: ${{ steps.filter.outputs.frontend }}
|
|
||||||
query-service: ${{ steps.filter.outputs.query-service }}
|
|
||||||
flattener: ${{ steps.filter.outputs.flattener }}
|
|
||||||
steps:
|
|
||||||
# For pull requests it's not necessary to checkout the code
|
|
||||||
- uses: dorny/paths-filter@v2
|
|
||||||
id: filter
|
|
||||||
with:
|
|
||||||
filters: |
|
|
||||||
frontend:
|
|
||||||
- 'frontend/**'
|
|
||||||
query-service:
|
|
||||||
- 'pkg/query-service/**'
|
|
||||||
flattener:
|
|
||||||
- 'pkg/processors/flattener/**'
|
|
||||||
|
|
||||||
build-frontend:
|
build-frontend:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs:
|
|
||||||
- get_filters
|
|
||||||
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd frontend && yarn install
|
run: cd frontend && yarn install
|
||||||
- name: Run Prettier
|
|
||||||
run: cd frontend && npm run prettify
|
|
||||||
continue-on-error: true
|
|
||||||
- name: Run ESLint
|
- name: Run ESLint
|
||||||
run: cd frontend && npm run lint
|
run: cd frontend && npm run lint
|
||||||
continue-on-error: true
|
- name: Run Jest
|
||||||
|
run: cd frontend && npm run jest
|
||||||
|
- name: TSC
|
||||||
|
run: yarn tsc
|
||||||
|
working-directory: ./frontend
|
||||||
- name: Build frontend docker image
|
- name: Build frontend docker image
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -52,9 +29,6 @@ jobs:
|
|||||||
|
|
||||||
build-query-service:
|
build-query-service:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs:
|
|
||||||
- get_filters
|
|
||||||
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
@@ -62,16 +36,3 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make build-query-service-amd64
|
make build-query-service-amd64
|
||||||
|
|
||||||
build-flattener:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- get_filters
|
|
||||||
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Build flattener docker image
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make build-flattener-amd64
|
|
||||||
|
|||||||
17
.github/workflows/codeball.yml
vendored
Normal file
17
.github/workflows/codeball.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
name: Codeball
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codeball_job:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Codeball
|
||||||
|
steps:
|
||||||
|
# Run Codeball on all new Pull Requests 🚀
|
||||||
|
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
|
||||||
|
- name: Codeball
|
||||||
|
uses: sturdy-dev/codeball-action@v2
|
||||||
|
with:
|
||||||
|
approvePullRequests: "true"
|
||||||
|
labelPullRequestsWhenApproved: "true"
|
||||||
|
labelPullRequestsWhenReviewNeeded: "false"
|
||||||
|
failJobsWhenReviewNeeded: "false"
|
||||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- closed
|
||||||
|
|
||||||
|
env:
|
||||||
|
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||||
|
PR_NUMBER: ${{ github.event.number }}
|
||||||
|
jobs:
|
||||||
|
create_issue_on_merge:
|
||||||
|
if: github.event.pull_request.merged == true
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Codebase
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
repository: signoz/gh-bot
|
||||||
|
- name: Use Node v16
|
||||||
|
uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: 16
|
||||||
|
- name: Setup Cache & Install Dependencies
|
||||||
|
uses: bahmutov/npm-install@v1
|
||||||
|
with:
|
||||||
|
install-command: yarn --frozen-lockfile
|
||||||
|
- name: Comment on PR
|
||||||
|
run: node create-issue.js
|
||||||
5
.github/workflows/e2e-k3s.yaml
vendored
5
.github/workflows/e2e-k3s.yaml
vendored
@@ -52,14 +52,11 @@ jobs:
|
|||||||
helm install my-release signoz/signoz -n platform \
|
helm install my-release signoz/signoz -n platform \
|
||||||
--wait \
|
--wait \
|
||||||
--timeout 10m0s \
|
--timeout 10m0s \
|
||||||
--set cloud=null \
|
|
||||||
--set frontend.service.type=LoadBalancer \
|
--set frontend.service.type=LoadBalancer \
|
||||||
--set query-service.image.tag=$DOCKER_TAG \
|
--set queryService.image.tag=$DOCKER_TAG \
|
||||||
--set frontend.image.tag=$DOCKER_TAG
|
--set frontend.image.tag=$DOCKER_TAG
|
||||||
|
|
||||||
# get pods, services and the container images
|
# get pods, services and the container images
|
||||||
kubectl describe deploy/my-release-frontend -n platform | grep Image
|
|
||||||
kubectl describe statefulset/my-release-query-service -n platform | grep Image
|
|
||||||
kubectl get pods -n platform
|
kubectl get pods -n platform
|
||||||
kubectl get svc -n platform
|
kubectl get svc -n platform
|
||||||
|
|
||||||
|
|||||||
24
.github/workflows/playwright.yaml
vendored
Normal file
24
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: Playwright Tests
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
playwright:
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: frontend
|
||||||
|
timeout-minutes: 60
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: "16.x"
|
||||||
|
- name: Install dependencies
|
||||||
|
run: CI=1 yarn install
|
||||||
|
- name: Install Playwright
|
||||||
|
run: npx playwright install --with-deps
|
||||||
|
- name: Run Playwright tests
|
||||||
|
run: yarn playwright
|
||||||
|
env:
|
||||||
|
# This might depend on your test-runner/language binding
|
||||||
|
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}
|
||||||
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||||
|
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||||
|
# a comment alerting users of this missing requirement.
|
||||||
|
name: VerifyIssue
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [edited, synchronize, opened, reopened]
|
||||||
|
check_run:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
verify_linked_issue:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Ensure Pull Request has a linked issue.
|
||||||
|
steps:
|
||||||
|
- name: Verify Linked Issue
|
||||||
|
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
25
.github/workflows/repo-stats.yml
vendored
Normal file
25
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Run this once per day, towards the end of the day for keeping the most
|
||||||
|
# recent data point most meaningful (hours are interpreted in UTC).
|
||||||
|
- cron: "0 8 * * *"
|
||||||
|
workflow_dispatch: # Allow for running this manually.
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
j1:
|
||||||
|
name: repostats
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: run-ghrs
|
||||||
|
uses: jgehrcke/github-repo-stats@v1.1.0
|
||||||
|
with:
|
||||||
|
# Define the stats repository (the repo to fetch
|
||||||
|
# stats for and to generate the report for).
|
||||||
|
# Remove the parameter when the stats repository
|
||||||
|
# and the data repository are the same.
|
||||||
|
repository: signoz/signoz
|
||||||
|
# Set a GitHub API token that can read the stats
|
||||||
|
# repository, and that can push to the data
|
||||||
|
# repository (which this workflow file lives in),
|
||||||
|
# to store data and the report files.
|
||||||
|
ghtoken: ${{ github.token }}
|
||||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -15,6 +15,7 @@ frontend/build
|
|||||||
frontend/.vscode
|
frontend/.vscode
|
||||||
frontend/.yarnclean
|
frontend/.yarnclean
|
||||||
frontend/.temp_cache
|
frontend/.temp_cache
|
||||||
|
frontend/test-results
|
||||||
|
|
||||||
# misc
|
# misc
|
||||||
.DS_Store
|
.DS_Store
|
||||||
@@ -27,10 +28,6 @@ frontend/npm-debug.log*
|
|||||||
frontend/yarn-debug.log*
|
frontend/yarn-debug.log*
|
||||||
frontend/yarn-error.log*
|
frontend/yarn-error.log*
|
||||||
frontend/src/constants/env.ts
|
frontend/src/constants/env.ts
|
||||||
frontend/cypress/**/*.mp4
|
|
||||||
|
|
||||||
# env file for cypress
|
|
||||||
frontend/cypress.env.json
|
|
||||||
|
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
@@ -42,4 +39,11 @@ frontend/cypress.env.json
|
|||||||
|
|
||||||
frontend/*.env
|
frontend/*.env
|
||||||
pkg/query-service/signoz.db
|
pkg/query-service/signoz.db
|
||||||
|
|
||||||
|
pkg/query-service/tests/test-deploy/data/
|
||||||
|
|
||||||
|
|
||||||
|
# local data
|
||||||
|
|
||||||
/deploy/docker/clickhouse-setup/data/
|
/deploy/docker/clickhouse-setup/data/
|
||||||
|
/deploy/docker-swarm/clickhouse-setup/data/
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ tasks:
|
|||||||
- name: Run Docker Images
|
- name: Run Docker Images
|
||||||
init: |
|
init: |
|
||||||
cd ./deploy
|
cd ./deploy
|
||||||
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d
|
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||||
# command:
|
# command:
|
||||||
|
|
||||||
- name: Run Frontend
|
- name: Run Frontend
|
||||||
@@ -22,7 +22,7 @@ tasks:
|
|||||||
yarn dev
|
yarn dev
|
||||||
|
|
||||||
ports:
|
ports:
|
||||||
- port: 3000
|
- port: 3301
|
||||||
onOpen: open-browser
|
onOpen: open-browser
|
||||||
- port: 8080
|
- port: 8080
|
||||||
onOpen: ignore
|
onOpen: ignore
|
||||||
|
|||||||
@@ -4,4 +4,4 @@
|
|||||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||||
|
|
||||||
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||||
|
|||||||
@@ -18,9 +18,15 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
|
|||||||
### Contribute to Frontend with Docker installation of SigNoz
|
### Contribute to Frontend with Docker installation of SigNoz
|
||||||
|
|
||||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
||||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
|
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L62`
|
||||||
- run `cd deploy` to move to deploy directory
|
- run `cd deploy` to move to deploy directory
|
||||||
- Install signoz locally without the frontend
|
- Install signoz locally without the frontend
|
||||||
|
- Add below configuration to query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||||
|
|
||||||
|
```docker
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
```
|
||||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
|
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
|
||||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||||
@@ -47,13 +53,32 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht
|
|||||||
### To run ClickHouse setup (recommended for local development)
|
### To run ClickHouse setup (recommended for local development)
|
||||||
|
|
||||||
- git clone https://github.com/SigNoz/signoz.git
|
- git clone https://github.com/SigNoz/signoz.git
|
||||||
|
- run `cd signoz` to move to signoz directory
|
||||||
- run `sudo make dev-setup` to configure local setup to run query-service
|
- run `sudo make dev-setup` to configure local setup to run query-service
|
||||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L59`
|
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml`
|
||||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml`
|
||||||
|
- add below configuration to clickhouse section at `docker/clickhouse-setup/docker-compose.yaml`
|
||||||
|
```docker
|
||||||
|
expose:
|
||||||
|
- 9000
|
||||||
|
ports:
|
||||||
|
- 9001:9000
|
||||||
|
```
|
||||||
|
|
||||||
|
- run `cd pkg/query-service/` to move to query-service directory
|
||||||
|
- Open ./constants/constants.go
|
||||||
|
- Replace ```const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db"``` \
|
||||||
|
with ```const RELATIONAL_DATASOURCE_PATH = "./signoz.db".```
|
||||||
|
|
||||||
- Install signoz locally without the frontend and query-service
|
- Install signoz locally without the frontend and query-service
|
||||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
|
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
|
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
|
||||||
|
|
||||||
|
#### Run locally
|
||||||
|
```console
|
||||||
|
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||||
|
```
|
||||||
|
|
||||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||||
|
|
||||||
**_Query Service should now be available at `http://localhost:8080`_**
|
**_Query Service should now be available at `http://localhost:8080`_**
|
||||||
@@ -61,13 +86,13 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht
|
|||||||
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
||||||
|
|
||||||
---
|
---
|
||||||
Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||||
|
|
||||||
Click the button below. A workspace with all required environments will be created.
|
Click the button below. A workspace with all required environments will be created.
|
||||||
|
|
||||||
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||||
|
|
||||||
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz`
|
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||||
|
|
||||||
# Contribute to SigNoz Helm Chart
|
# Contribute to SigNoz Helm Chart
|
||||||
|
|
||||||
@@ -81,32 +106,70 @@ Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/char
|
|||||||
- [k3d](https://k3d.io/#installation)
|
- [k3d](https://k3d.io/#installation)
|
||||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
||||||
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
|
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace.
|
||||||
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
- run `kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||||
|
|
||||||
|
**To install HotROD sample app:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||||
|
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||||
|
```
|
||||||
|
|
||||||
**To load data with HotROD sample app:**
|
**To load data with HotROD sample app:**
|
||||||
|
|
||||||
```sh
|
```bash
|
||||||
kubectl create ns sample-application
|
|
||||||
|
|
||||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
|
||||||
|
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||||
```
|
```
|
||||||
|
|
||||||
**To stop the load generation:**
|
**To stop the load generation:**
|
||||||
|
|
||||||
```sh
|
```bash
|
||||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||||
http://locust-master:8089/stop
|
http://locust-master:8089/stop
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**To delete HotROD sample app:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||||
|
| HOTROD_NAMESPACE=sample-application bash
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## General Instructions
|
## General Instructions
|
||||||
|
|
||||||
|
**Before making any significant changes, please open an issue**. Each issue
|
||||||
|
should describe the following:
|
||||||
|
|
||||||
|
* Requirement - what kind of use case are you trying to solve?
|
||||||
|
* Proposal - what do you suggest to solve the problem or improve the existing
|
||||||
|
situation?
|
||||||
|
* Any open questions to address
|
||||||
|
|
||||||
|
Discussing your proposed changes ahead of time will make the contribution
|
||||||
|
process smooth for everyone. Once the approach is agreed upon, make your changes
|
||||||
|
and open a pull request(s). Unless your change is small, Please consider submitting different PRs:
|
||||||
|
|
||||||
|
* First PR should include the overall structure of the new component:
|
||||||
|
* Readme, configuration, interfaces or base classes etc...
|
||||||
|
* This PR is usually trivial to review, so the size limit does not apply to
|
||||||
|
it.
|
||||||
|
* Second PR should include the concrete implementation of the component. If the
|
||||||
|
size of this PR is larger than the recommended size consider splitting it in
|
||||||
|
multiple PRs.
|
||||||
|
* If there are multiple sub-component then ideally each one should be implemented as
|
||||||
|
a separate pull request.
|
||||||
|
* Last PR should include changes to any user facing documentation. And should include
|
||||||
|
end to end tests if applicable. The component must be enabled
|
||||||
|
only after sufficient testing, and there is enough confidence in the
|
||||||
|
stability and quality of the component.
|
||||||
|
|
||||||
|
|
||||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
||||||
|
|
||||||
- If you find any bugs, please create an issue
|
- If you find any bugs, please create an issue
|
||||||
|
|||||||
55
Makefile
55
Makefile
@@ -10,15 +10,15 @@ BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
|||||||
|
|
||||||
# Internal variables or constants.
|
# Internal variables or constants.
|
||||||
FRONTEND_DIRECTORY ?= frontend
|
FRONTEND_DIRECTORY ?= frontend
|
||||||
FLATTENER_DIRECTORY ?= pkg/processors/flattener
|
|
||||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||||
|
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||||
|
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||||
|
|
||||||
REPONAME ?= signoz
|
REPONAME ?= signoz
|
||||||
DOCKER_TAG ?= latest
|
DOCKER_TAG ?= latest
|
||||||
|
|
||||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||||
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
|
|
||||||
|
|
||||||
# Build-time Go variables
|
# Build-time Go variables
|
||||||
PACKAGE?=go.signoz.io/query-service
|
PACKAGE?=go.signoz.io/query-service
|
||||||
@@ -29,7 +29,7 @@ gitBranch=${PACKAGE}/version.gitBranch
|
|||||||
|
|
||||||
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||||
|
|
||||||
all: build-push-frontend build-push-query-service build-push-flattener
|
all: build-push-frontend build-push-query-service
|
||||||
# Steps to build and push docker image of frontend
|
# Steps to build and push docker image of frontend
|
||||||
.PHONY: build-frontend-amd64 build-push-frontend
|
.PHONY: build-frontend-amd64 build-push-frontend
|
||||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||||
@@ -38,7 +38,8 @@ build-frontend-amd64:
|
|||||||
@echo "--> Building frontend docker image for amd64"
|
@echo "--> Building frontend docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@cd $(FRONTEND_DIRECTORY) && \
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" .
|
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
|
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||||
|
|
||||||
# Step to build and push docker image of frontend(used in push pipeline)
|
# Step to build and push docker image of frontend(used in push pipeline)
|
||||||
build-push-frontend:
|
build-push-frontend:
|
||||||
@@ -46,7 +47,8 @@ build-push-frontend:
|
|||||||
@echo "--> Building and pushing frontend docker image"
|
@echo "--> Building and pushing frontend docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@cd $(FRONTEND_DIRECTORY) && \
|
@cd $(FRONTEND_DIRECTORY) && \
|
||||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
|
||||||
|
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
|
|
||||||
# Steps to build and push docker image of query service
|
# Steps to build and push docker image of query service
|
||||||
.PHONY: build-query-service-amd64 build-push-query-service
|
.PHONY: build-query-service-amd64 build-push-query-service
|
||||||
@@ -56,7 +58,8 @@ build-query-service-amd64:
|
|||||||
@echo "--> Building query-service docker image for amd64"
|
@echo "--> Building query-service docker image for amd64"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS)
|
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||||
|
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS) .
|
||||||
|
|
||||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||||
build-push-query-service:
|
build-push-query-service:
|
||||||
@@ -64,25 +67,9 @@ build-push-query-service:
|
|||||||
@echo "--> Building and pushing query-service docker image"
|
@echo "--> Building and pushing query-service docker image"
|
||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
docker buildx build --file Dockerfile --progress plane --no-cache \
|
||||||
|
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
|
||||||
# Steps to build and push docker image of flattener
|
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||||
.PHONY: build-flattener-amd64 build-push-flattener
|
|
||||||
# Step to build docker image of flattener in amd64 (used in build pipeline)
|
|
||||||
build-flattener-amd64:
|
|
||||||
@echo "------------------"
|
|
||||||
@echo "--> Building flattener docker image for amd64"
|
|
||||||
@echo "------------------"
|
|
||||||
@cd $(FLATTENER_DIRECTORY) && \
|
|
||||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
|
|
||||||
|
|
||||||
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
|
|
||||||
build-push-flattener:
|
|
||||||
@echo "------------------"
|
|
||||||
@echo "--> Building and pushing flattener docker image"
|
|
||||||
@echo "------------------"
|
|
||||||
@cd $(FLATTENER_DIRECTORY) && \
|
|
||||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
|
|
||||||
|
|
||||||
dev-setup:
|
dev-setup:
|
||||||
mkdir -p /var/lib/signoz
|
mkdir -p /var/lib/signoz
|
||||||
@@ -93,7 +80,21 @@ dev-setup:
|
|||||||
@echo "------------------"
|
@echo "------------------"
|
||||||
|
|
||||||
run-x86:
|
run-x86:
|
||||||
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up -d
|
||||||
|
|
||||||
run-arm:
|
run-arm:
|
||||||
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml up -d
|
||||||
|
|
||||||
|
down-x86:
|
||||||
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||||
|
|
||||||
|
down-arm:
|
||||||
|
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml down -v
|
||||||
|
|
||||||
|
clear-standalone-data:
|
||||||
|
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||||
|
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||||
|
|
||||||
|
clear-swarm-data:
|
||||||
|
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||||
|
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
@@ -34,8 +34,10 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
|||||||
|
|
||||||
|
|
||||||

|

|
||||||
|
<br />
|
||||||

|

|
||||||
|
<br />
|
||||||
|

|
||||||
|
|
||||||
<br /><br />
|
<br /><br />
|
||||||
|
|
||||||
|
|||||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||||
|
|
||||||
|
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||||
|
|
||||||
|
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
Thank you for keeping SigNoz and our users safe. 🙇
|
||||||
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
global:
|
||||||
|
resolve_timeout: 1m
|
||||||
|
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||||
|
|
||||||
|
route:
|
||||||
|
receiver: 'slack-notifications'
|
||||||
|
|
||||||
|
receivers:
|
||||||
|
- name: 'slack-notifications'
|
||||||
|
slack_configs:
|
||||||
|
- channel: '#alerts'
|
||||||
|
send_resolved: true
|
||||||
|
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||||
|
title: |-
|
||||||
|
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||||
|
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||||
|
{{" "}}(
|
||||||
|
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||||
|
{{- range $index, $label := .SortedPairs -}}
|
||||||
|
{{ if $index }}, {{ end }}
|
||||||
|
{{- $label.Name }}="{{ $label.Value -}}"
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
)
|
||||||
|
{{- end }}
|
||||||
|
text: >-
|
||||||
|
{{ range .Alerts -}}
|
||||||
|
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||||
|
|
||||||
|
*Description:* {{ .Annotations.description }}
|
||||||
|
|
||||||
|
*Details:*
|
||||||
|
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}
|
||||||
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
groups:
|
||||||
|
- name: ExampleCPULoadGroup
|
||||||
|
rules:
|
||||||
|
- alert: HighCpuLoad
|
||||||
|
expr: system_cpu_load_average_1m > 0.1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: High CPU load
|
||||||
|
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
File diff suppressed because it is too large
Load Diff
28
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
28
deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<default>
|
||||||
|
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<type>s3</type>
|
||||||
|
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||||
|
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||||
|
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||||
|
</s3>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<tiered>
|
||||||
|
<volumes>
|
||||||
|
<default>
|
||||||
|
<disk>default</disk>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<disk>s3</disk>
|
||||||
|
</s3>
|
||||||
|
</volumes>
|
||||||
|
</tiered>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
</clickhouse>
|
||||||
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||||
|
|
||||||
|
<!-- Profiles of settings. -->
|
||||||
|
<profiles>
|
||||||
|
<!-- Default settings. -->
|
||||||
|
<default>
|
||||||
|
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||||
|
<max_memory_usage>10000000000</max_memory_usage>
|
||||||
|
|
||||||
|
<!-- How to choose between replicas during distributed query processing.
|
||||||
|
random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
with minimum number of different symbols between replica's hostname and local hostname
|
||||||
|
(Hamming distance).
|
||||||
|
in_order - first live replica is chosen in specified order.
|
||||||
|
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
-->
|
||||||
|
<load_balancing>random</load_balancing>
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Profile that allows only read queries. -->
|
||||||
|
<readonly>
|
||||||
|
<readonly>1</readonly>
|
||||||
|
</readonly>
|
||||||
|
</profiles>
|
||||||
|
|
||||||
|
<!-- Users and ACL. -->
|
||||||
|
<users>
|
||||||
|
<!-- If user name was not specified, 'default' user is used. -->
|
||||||
|
<default>
|
||||||
|
<!-- See also the files in users.d directory where the password can be overridden.
|
||||||
|
|
||||||
|
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
|
||||||
|
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
Example: <password>qwerty</password>.
|
||||||
|
Password could be empty.
|
||||||
|
|
||||||
|
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||||
|
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
|
||||||
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||||
|
place its name in 'server' element inside 'ldap' element.
|
||||||
|
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||||
|
|
||||||
|
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||||
|
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||||
|
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||||
|
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||||
|
whose initiator's realm matches it.
|
||||||
|
Example: <kerberos />
|
||||||
|
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||||
|
|
||||||
|
How to generate decent password:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
|
||||||
|
How to generate double SHA1:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding double SHA1.
|
||||||
|
-->
|
||||||
|
<password></password>
|
||||||
|
|
||||||
|
<!-- List of networks with open access.
|
||||||
|
|
||||||
|
To open access from everywhere, specify:
|
||||||
|
<ip>::/0</ip>
|
||||||
|
|
||||||
|
To open access only from localhost, specify:
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
|
||||||
|
Each element of list has one of the following forms:
|
||||||
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
<host> Hostname. Example: server01.clickhouse.com.
|
||||||
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
Strongly recommended that regexp is ends with $
|
||||||
|
All results of DNS requests are cached till server restart.
|
||||||
|
-->
|
||||||
|
<networks>
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
|
||||||
|
<!-- Settings profile for user. -->
|
||||||
|
<profile>default</profile>
|
||||||
|
|
||||||
|
<!-- Quota for user. -->
|
||||||
|
<quota>default</quota>
|
||||||
|
|
||||||
|
<!-- User can create other users and grant rights to them. -->
|
||||||
|
<!-- <access_management>1</access_management> -->
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
|
||||||
|
<!-- Quotas. -->
|
||||||
|
<quotas>
|
||||||
|
<!-- Name of quota. -->
|
||||||
|
<default>
|
||||||
|
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||||
|
<interval>
|
||||||
|
<!-- Length of interval. -->
|
||||||
|
<duration>3600</duration>
|
||||||
|
|
||||||
|
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||||
|
<queries>0</queries>
|
||||||
|
<errors>0</errors>
|
||||||
|
<result_rows>0</result_rows>
|
||||||
|
<read_rows>0</read_rows>
|
||||||
|
<execution_time>0</execution_time>
|
||||||
|
</interval>
|
||||||
|
</default>
|
||||||
|
</quotas>
|
||||||
|
</clickhouse>
|
||||||
@@ -1,106 +1,135 @@
|
|||||||
version: "3"
|
version: "3.9"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: yandex/clickhouse-server
|
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||||
expose:
|
# ports:
|
||||||
- 8123
|
# - "9000:9000"
|
||||||
- 9000
|
# - "8123:8123"
|
||||||
ports:
|
tty: true
|
||||||
- 9001:9000
|
volumes:
|
||||||
- 8123:8123
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
volumes:
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
healthcheck:
|
alertmanager:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
image: signoz/alertmanager:0.23.0-0.1
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
volumes:
|
||||||
interval: 30s
|
- ./data/alertmanager:/data
|
||||||
timeout: 5s
|
command:
|
||||||
retries: 3
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
depends_on:
|
||||||
|
- query-service
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.4.1
|
image: signoz/query-service:0.9.1
|
||||||
container_name: query-service
|
|
||||||
restart: always
|
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
ports:
|
# ports:
|
||||||
- "8080:8080"
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
volumes:
|
volumes:
|
||||||
- ./prometheus.yml:/root/config/prometheus.yml
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000
|
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||||
- STORAGE=clickhouse
|
- STORAGE=clickhouse
|
||||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
|
||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
depends_on:
|
- TELEMETRY_ENABLED=true
|
||||||
- clickhouse
|
- DEPLOYMENT_TYPE=docker-swarm
|
||||||
|
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.4.1
|
image: signoz/frontend:0.9.1
|
||||||
container_name: frontend
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
- query-service
|
- query-service
|
||||||
links:
|
|
||||||
- "query-service"
|
|
||||||
ports:
|
ports:
|
||||||
- "3301:3301"
|
- "3301:3301"
|
||||||
volumes:
|
volumes:
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/otelcontribcol:0.4.0
|
image: signoz/otelcontribcol:0.45.1-1.0
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=2000"]
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
ports:
|
ports:
|
||||||
- "1777:1777" # pprof extension
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
- "14268:14268" # Jaeger receiver
|
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||||
- "55678" # OpenCensus receiver
|
# - "13133:13133" # health_check
|
||||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
# - "14268:14268" # Jaeger receiver
|
||||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
# - "55678:55678" # OpenCensus receiver
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
# - "55679:55679" # zpages extension
|
||||||
- "55679:55679" # zpages extension
|
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||||
- "13133" # health_check
|
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: 3
|
replicas: 3
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2000m
|
||||||
depends_on:
|
depends_on:
|
||||||
- clickhouse
|
- clickhouse
|
||||||
|
|
||||||
otel-collector-hostmetrics:
|
otel-collector-metrics:
|
||||||
image: signoz/otelcontribcol:0.4.0
|
image: signoz/otelcontribcol:0.45.1-1.0
|
||||||
command: ["--config=/etc/otel-collector-config-hostmetrics.yaml", "--mem-ballast-size-mib=683"]
|
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config-hostmetrics.yaml:/etc/otel-collector-config-hostmetrics.yaml
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
- clickhouse
|
- clickhouse
|
||||||
|
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:latest
|
image: jaegertracing/example-hotrod:1.30
|
||||||
container_name: hotrod
|
|
||||||
ports:
|
|
||||||
- "9000:8080"
|
|
||||||
command: ["all"]
|
command: ["all"]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
load-hotrod:
|
load-hotrod:
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
hostname: load-hotrod
|
||||||
ports:
|
|
||||||
- "8089:8089"
|
|
||||||
environment:
|
environment:
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
ATTACKED_HOST: http://hotrod:8080
|
||||||
LOCUST_MODE: standalone
|
LOCUST_MODE: standalone
|
||||||
@@ -110,4 +139,4 @@ services:
|
|||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
volumes:
|
volumes:
|
||||||
- ../common/locust-scripts:/locust
|
- ../common/locust-scripts:/locust
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
receivers:
|
|
||||||
otlp:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
http:
|
|
||||||
jaeger:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
thrift_http:
|
|
||||||
|
|
||||||
hostmetrics:
|
|
||||||
collection_interval: 60s
|
|
||||||
scrapers:
|
|
||||||
cpu:
|
|
||||||
load:
|
|
||||||
memory:
|
|
||||||
disk:
|
|
||||||
filesystem:
|
|
||||||
network:
|
|
||||||
|
|
||||||
# Data sources: metrics
|
|
||||||
prometheus:
|
|
||||||
config:
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: "otel-collector"
|
|
||||||
dns_sd_configs:
|
|
||||||
- names:
|
|
||||||
- 'tasks.signoz_otel-collector'
|
|
||||||
type: 'A'
|
|
||||||
port: 8888
|
|
||||||
- job_name: "otel-collector-hostmetrics"
|
|
||||||
scrape_interval: 10s
|
|
||||||
static_configs:
|
|
||||||
- targets: ["otel-collector-hostmetrics:8888"]
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 1000
|
|
||||||
timeout: 10s
|
|
||||||
memory_limiter:
|
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
|
||||||
ballast_size_mib: 683
|
|
||||||
# 80% of maximum memory up to 2G
|
|
||||||
limit_mib: 1500
|
|
||||||
# 25% of limit up to 2G
|
|
||||||
spike_limit_mib: 512
|
|
||||||
check_interval: 5s
|
|
||||||
# queued_retry:
|
|
||||||
# num_workers: 4
|
|
||||||
# queue_size: 100
|
|
||||||
# retry_on_failure: true
|
|
||||||
extensions:
|
|
||||||
health_check: {}
|
|
||||||
zpages: {}
|
|
||||||
exporters:
|
|
||||||
clickhouse:
|
|
||||||
datasource: tcp://clickhouse:9000
|
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
|
||||||
resource_to_telemetry_conversion:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
service:
|
|
||||||
extensions: [health_check, zpages]
|
|
||||||
pipelines:
|
|
||||||
traces:
|
|
||||||
receivers: [jaeger, otlp]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [clickhouse]
|
|
||||||
metrics:
|
|
||||||
receivers: [otlp, prometheus, hostmetrics]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [clickhousemetricswrite]
|
|
||||||
@@ -1,4 +1,8 @@
|
|||||||
receivers:
|
receivers:
|
||||||
|
otlp/spanmetrics:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: "localhost:12345"
|
||||||
otlp:
|
otlp:
|
||||||
protocols:
|
protocols:
|
||||||
grpc:
|
grpc:
|
||||||
@@ -7,18 +11,40 @@ receivers:
|
|||||||
protocols:
|
protocols:
|
||||||
grpc:
|
grpc:
|
||||||
thrift_http:
|
thrift_http:
|
||||||
|
hostmetrics:
|
||||||
|
collection_interval: 60s
|
||||||
|
scrapers:
|
||||||
|
cpu:
|
||||||
|
load:
|
||||||
|
memory:
|
||||||
|
disk:
|
||||||
|
filesystem:
|
||||||
|
network:
|
||||||
processors:
|
processors:
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 1000
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
memory_limiter:
|
signozspanmetrics/prometheus:
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
metrics_exporter: prometheus
|
||||||
ballast_size_mib: 683
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
# 80% of maximum memory up to 2G
|
dimensions_cache_size: 10000
|
||||||
limit_mib: 1500
|
dimensions:
|
||||||
# 25% of limit up to 2G
|
- name: service.namespace
|
||||||
spike_limit_mib: 512
|
default: default
|
||||||
check_interval: 5s
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# memory_limiter:
|
||||||
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
# queued_retry:
|
# queued_retry:
|
||||||
# num_workers: 4
|
# num_workers: 4
|
||||||
# queue_size: 100
|
# queue_size: 100
|
||||||
@@ -27,21 +53,25 @@ extensions:
|
|||||||
health_check: {}
|
health_check: {}
|
||||||
zpages: {}
|
zpages: {}
|
||||||
exporters:
|
exporters:
|
||||||
clickhouse:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000
|
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
prometheus:
|
||||||
|
endpoint: "0.0.0.0:8889"
|
||||||
service:
|
service:
|
||||||
extensions: [health_check, zpages]
|
extensions: [health_check, zpages]
|
||||||
pipelines:
|
pipelines:
|
||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [batch]
|
processors: [signozspanmetrics/prometheus, batch]
|
||||||
exporters: [clickhouse]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp, hostmetrics]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite]
|
||||||
|
metrics/spanmetrics:
|
||||||
|
receivers: [otlp/spanmetrics]
|
||||||
|
exporters: [prometheus]
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
receivers:
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
http:
|
||||||
|
|
||||||
|
# Data sources: metrics
|
||||||
|
prometheus:
|
||||||
|
config:
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: "otel-collector"
|
||||||
|
scrape_interval: 60s
|
||||||
|
static_configs:
|
||||||
|
- targets: ["otel-collector:8889"]
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
|
timeout: 10s
|
||||||
|
# memory_limiter:
|
||||||
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
|
# queued_retry:
|
||||||
|
# num_workers: 4
|
||||||
|
# queue_size: 100
|
||||||
|
# retry_on_failure: true
|
||||||
|
extensions:
|
||||||
|
health_check: {}
|
||||||
|
zpages: {}
|
||||||
|
exporters:
|
||||||
|
clickhousemetricswrite:
|
||||||
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
|
|
||||||
|
service:
|
||||||
|
extensions: [health_check, zpages]
|
||||||
|
pipelines:
|
||||||
|
metrics:
|
||||||
|
receivers: [otlp, prometheus]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [clickhousemetricswrite]
|
||||||
@@ -9,12 +9,13 @@ alerting:
|
|||||||
alertmanagers:
|
alertmanagers:
|
||||||
- static_configs:
|
- static_configs:
|
||||||
- targets:
|
- targets:
|
||||||
# - alertmanager:9093
|
- alertmanager:9093
|
||||||
|
|
||||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||||
rule_files:
|
rule_files:
|
||||||
# - "first_rules.yml"
|
# - "first_rules.yml"
|
||||||
# - "second_rules.yml"
|
# - "second_rules.yml"
|
||||||
|
- 'alerts.yml'
|
||||||
|
|
||||||
# A scrape configuration containing exactly one endpoint to scrape:
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
# Here it's Prometheus itself.
|
# Here it's Prometheus itself.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
server {
|
server {
|
||||||
listen 3301;
|
listen 3301;
|
||||||
server_name _;
|
server_name _;
|
||||||
|
|
||||||
gzip on;
|
gzip on;
|
||||||
gzip_static on;
|
gzip_static on;
|
||||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||||
@@ -12,19 +12,26 @@ server {
|
|||||||
gzip_http_version 1.1;
|
gzip_http_version 1.1;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
root /usr/share/nginx/html;
|
if ( $uri = '/index.html' ) {
|
||||||
index index.html index.htm;
|
add_header Cache-Control no-store always;
|
||||||
try_files $uri $uri/ /index.html;
|
}
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location /api/alertmanager {
|
||||||
|
proxy_pass http://alertmanager:9093/api/v2;
|
||||||
|
}
|
||||||
|
|
||||||
location /api {
|
location /api {
|
||||||
proxy_pass http://query-service:8080/api;
|
proxy_pass http://query-service:8080/api;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# redirect server error pages to the static page /50x.html
|
# redirect server error pages to the static page /50x.html
|
||||||
#
|
#
|
||||||
error_page 500 502 503 504 /50x.html;
|
error_page 500 502 503 504 /50x.html;
|
||||||
location = /50x.html {
|
location = /50x.html {
|
||||||
root /usr/share/nginx/html;
|
root /usr/share/nginx/html;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
28
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
28
deploy/docker/clickhouse-setup/clickhouse-storage.xml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<default>
|
||||||
|
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<type>s3</type>
|
||||||
|
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||||
|
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||||
|
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||||
|
</s3>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<tiered>
|
||||||
|
<volumes>
|
||||||
|
<default>
|
||||||
|
<disk>default</disk>
|
||||||
|
</default>
|
||||||
|
<s3>
|
||||||
|
<disk>s3</disk>
|
||||||
|
</s3>
|
||||||
|
</volumes>
|
||||||
|
</tiered>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
</clickhouse>
|
||||||
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
123
deploy/docker/clickhouse-setup/clickhouse-users.xml
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||||
|
|
||||||
|
<!-- Profiles of settings. -->
|
||||||
|
<profiles>
|
||||||
|
<!-- Default settings. -->
|
||||||
|
<default>
|
||||||
|
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||||
|
<max_memory_usage>10000000000</max_memory_usage>
|
||||||
|
|
||||||
|
<!-- How to choose between replicas during distributed query processing.
|
||||||
|
random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
with minimum number of different symbols between replica's hostname and local hostname
|
||||||
|
(Hamming distance).
|
||||||
|
in_order - first live replica is chosen in specified order.
|
||||||
|
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
-->
|
||||||
|
<load_balancing>random</load_balancing>
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Profile that allows only read queries. -->
|
||||||
|
<readonly>
|
||||||
|
<readonly>1</readonly>
|
||||||
|
</readonly>
|
||||||
|
</profiles>
|
||||||
|
|
||||||
|
<!-- Users and ACL. -->
|
||||||
|
<users>
|
||||||
|
<!-- If user name was not specified, 'default' user is used. -->
|
||||||
|
<default>
|
||||||
|
<!-- See also the files in users.d directory where the password can be overridden.
|
||||||
|
|
||||||
|
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
|
||||||
|
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
Example: <password>qwerty</password>.
|
||||||
|
Password could be empty.
|
||||||
|
|
||||||
|
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||||
|
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
|
||||||
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||||
|
place its name in 'server' element inside 'ldap' element.
|
||||||
|
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||||
|
|
||||||
|
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||||
|
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||||
|
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||||
|
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||||
|
whose initiator's realm matches it.
|
||||||
|
Example: <kerberos />
|
||||||
|
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||||
|
|
||||||
|
How to generate decent password:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
|
||||||
|
How to generate double SHA1:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding double SHA1.
|
||||||
|
-->
|
||||||
|
<password></password>
|
||||||
|
|
||||||
|
<!-- List of networks with open access.
|
||||||
|
|
||||||
|
To open access from everywhere, specify:
|
||||||
|
<ip>::/0</ip>
|
||||||
|
|
||||||
|
To open access only from localhost, specify:
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
|
||||||
|
Each element of list has one of the following forms:
|
||||||
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
<host> Hostname. Example: server01.clickhouse.com.
|
||||||
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
Strongly recommended that regexp is ends with $
|
||||||
|
All results of DNS requests are cached till server restart.
|
||||||
|
-->
|
||||||
|
<networks>
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
|
||||||
|
<!-- Settings profile for user. -->
|
||||||
|
<profile>default</profile>
|
||||||
|
|
||||||
|
<!-- Quota for user. -->
|
||||||
|
<quota>default</quota>
|
||||||
|
|
||||||
|
<!-- User can create other users and grant rights to them. -->
|
||||||
|
<!-- <access_management>1</access_management> -->
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
|
||||||
|
<!-- Quotas. -->
|
||||||
|
<quotas>
|
||||||
|
<!-- Name of quota. -->
|
||||||
|
<default>
|
||||||
|
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||||
|
<interval>
|
||||||
|
<!-- Length of interval. -->
|
||||||
|
<duration>3600</duration>
|
||||||
|
|
||||||
|
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||||
|
<queries>0</queries>
|
||||||
|
<errors>0</errors>
|
||||||
|
<result_rows>0</result_rows>
|
||||||
|
<read_rows>0</read_rows>
|
||||||
|
<execution_time>0</execution_time>
|
||||||
|
</interval>
|
||||||
|
</default>
|
||||||
|
</quotas>
|
||||||
|
</clickhouse>
|
||||||
1304
deploy/docker/clickhouse-setup/config.xml
Normal file
1304
deploy/docker/clickhouse-setup/config.xml
Normal file
File diff suppressed because it is too large
Load Diff
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
@@ -2,10 +2,21 @@ version: "2.4"
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: yandex/clickhouse-server:21.12.3.32
|
image: clickhouse/clickhouse-server:22.4.5-alpine
|
||||||
|
# ports:
|
||||||
|
# - "9000:9000"
|
||||||
|
# - "8123:8123"
|
||||||
|
tty: true
|
||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
restart: on-failure
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
@@ -14,38 +25,52 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.5.0
|
image: signoz/alertmanager:0.23.0-0.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./alertmanager.yml:/prometheus/alertmanager.yml
|
|
||||||
- ./data/alertmanager:/data
|
- ./data/alertmanager:/data
|
||||||
|
depends_on:
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: on-failure
|
||||||
command:
|
command:
|
||||||
- '--config.file=/prometheus/alertmanager.yml'
|
- --queryService.url=http://query-service:8085
|
||||||
- '--storage.path=/data'
|
- --storage.path=/data
|
||||||
|
|
||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.6.1
|
image: signoz/query-service:0.9.1
|
||||||
container_name: query-service
|
container_name: query-service
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
volumes:
|
volumes:
|
||||||
- ./prometheus.yml:/root/config/prometheus.yml
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000
|
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||||
- STORAGE=clickhouse
|
- STORAGE=clickhouse
|
||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.6.1
|
image: signoz/frontend:0.9.1
|
||||||
container_name: frontend
|
container_name: frontend
|
||||||
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
- query-service
|
- query-service
|
||||||
ports:
|
ports:
|
||||||
- "3301:3301"
|
- "3301:3301"
|
||||||
@@ -53,37 +78,46 @@ services:
|
|||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/otelcontribcol:0.5.0
|
image: signoz/otelcontribcol:0.45.1-1.0
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
ports:
|
ports:
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||||
|
# - "13133:13133" # health_check
|
||||||
|
# - "14268:14268" # Jaeger receiver
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zpages extension
|
||||||
|
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||||
|
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||||
mem_limit: 2000m
|
mem_limit: 2000m
|
||||||
restart: always
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
image: signoz/otelcontribcol:0.5.0
|
image: signoz/otelcontribcol:0.45.1-1.0
|
||||||
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
|
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
container_name: hotrod
|
container_name: hotrod
|
||||||
logging:
|
logging:
|
||||||
options:
|
options:
|
||||||
max-size: 50m
|
max-size: 50m
|
||||||
max-file: "3"
|
max-file: "3"
|
||||||
command: ["all"]
|
command: ["all"]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
load-hotrod:
|
load-hotrod:
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ receivers:
|
|||||||
grpc:
|
grpc:
|
||||||
thrift_http:
|
thrift_http:
|
||||||
hostmetrics:
|
hostmetrics:
|
||||||
collection_interval: 30s
|
collection_interval: 60s
|
||||||
scrapers:
|
scrapers:
|
||||||
cpu:
|
cpu:
|
||||||
load:
|
load:
|
||||||
@@ -22,19 +22,29 @@ receivers:
|
|||||||
network:
|
network:
|
||||||
processors:
|
processors:
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 1000
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
signozspanmetrics/prometheus:
|
signozspanmetrics/prometheus:
|
||||||
metrics_exporter: prometheus
|
metrics_exporter: prometheus
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
memory_limiter:
|
dimensions_cache_size: 10000
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
dimensions:
|
||||||
ballast_size_mib: 683
|
- name: service.namespace
|
||||||
# 80% of maximum memory up to 2G
|
default: default
|
||||||
limit_mib: 1500
|
- name: deployment.environment
|
||||||
# 25% of limit up to 2G
|
default: default
|
||||||
spike_limit_mib: 512
|
# memory_limiter:
|
||||||
check_interval: 5s
|
# # 80% of maximum memory up to 2G
|
||||||
|
# limit_mib: 1500
|
||||||
|
# # 25% of limit up to 2G
|
||||||
|
# spike_limit_mib: 512
|
||||||
|
# check_interval: 5s
|
||||||
|
#
|
||||||
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
# queued_retry:
|
# queued_retry:
|
||||||
# num_workers: 4
|
# num_workers: 4
|
||||||
# queue_size: 100
|
# queue_size: 100
|
||||||
@@ -43,8 +53,8 @@ extensions:
|
|||||||
health_check: {}
|
health_check: {}
|
||||||
zpages: {}
|
zpages: {}
|
||||||
exporters:
|
exporters:
|
||||||
clickhouse:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000
|
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
@@ -57,11 +67,11 @@ service:
|
|||||||
traces:
|
traces:
|
||||||
receivers: [jaeger, otlp]
|
receivers: [jaeger, otlp]
|
||||||
processors: [signozspanmetrics/prometheus, batch]
|
processors: [signozspanmetrics/prometheus, batch]
|
||||||
exporters: [clickhouse]
|
exporters: [clickhousetraces]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp, hostmetrics]
|
receivers: [otlp, hostmetrics]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite]
|
||||||
metrics/spanmetrics:
|
metrics/spanmetrics:
|
||||||
receivers: [otlp/spanmetrics]
|
receivers: [otlp/spanmetrics]
|
||||||
exporters: [prometheus]
|
exporters: [prometheus]
|
||||||
|
|||||||
@@ -9,21 +9,25 @@ receivers:
|
|||||||
config:
|
config:
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: "otel-collector"
|
- job_name: "otel-collector"
|
||||||
scrape_interval: 30s
|
scrape_interval: 60s
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["otel-collector:8889"]
|
- targets: ["otel-collector:8889"]
|
||||||
processors:
|
processors:
|
||||||
batch:
|
batch:
|
||||||
send_batch_size: 1000
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
memory_limiter:
|
# memory_limiter:
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
# # 80% of maximum memory up to 2G
|
||||||
ballast_size_mib: 683
|
# limit_mib: 1500
|
||||||
# 80% of maximum memory up to 2G
|
# # 25% of limit up to 2G
|
||||||
limit_mib: 1500
|
# spike_limit_mib: 512
|
||||||
# 25% of limit up to 2G
|
# check_interval: 5s
|
||||||
spike_limit_mib: 512
|
#
|
||||||
check_interval: 5s
|
# # 50% of the maximum memory
|
||||||
|
# limit_percentage: 50
|
||||||
|
# # 20% of max memory usage spike expected
|
||||||
|
# spike_limit_percentage: 20
|
||||||
# queued_retry:
|
# queued_retry:
|
||||||
# num_workers: 4
|
# num_workers: 4
|
||||||
# queue_size: 100
|
# queue_size: 100
|
||||||
@@ -41,4 +45,4 @@ service:
|
|||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp, prometheus]
|
receivers: [otlp, prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite]
|
exporters: [clickhousemetricswrite]
|
||||||
|
|||||||
123
deploy/docker/clickhouse-setup/users.xml
Normal file
123
deploy/docker/clickhouse-setup/users.xml
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clickhouse>
|
||||||
|
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||||
|
|
||||||
|
<!-- Profiles of settings. -->
|
||||||
|
<profiles>
|
||||||
|
<!-- Default settings. -->
|
||||||
|
<default>
|
||||||
|
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||||
|
<max_memory_usage>10000000000</max_memory_usage>
|
||||||
|
|
||||||
|
<!-- How to choose between replicas during distributed query processing.
|
||||||
|
random - choose random replica from set of replicas with minimum number of errors
|
||||||
|
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||||
|
with minimum number of different symbols between replica's hostname and local hostname
|
||||||
|
(Hamming distance).
|
||||||
|
in_order - first live replica is chosen in specified order.
|
||||||
|
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||||
|
-->
|
||||||
|
<load_balancing>random</load_balancing>
|
||||||
|
</default>
|
||||||
|
|
||||||
|
<!-- Profile that allows only read queries. -->
|
||||||
|
<readonly>
|
||||||
|
<readonly>1</readonly>
|
||||||
|
</readonly>
|
||||||
|
</profiles>
|
||||||
|
|
||||||
|
<!-- Users and ACL. -->
|
||||||
|
<users>
|
||||||
|
<!-- If user name was not specified, 'default' user is used. -->
|
||||||
|
<default>
|
||||||
|
<!-- See also the files in users.d directory where the password can be overridden.
|
||||||
|
|
||||||
|
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||||
|
|
||||||
|
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||||
|
Example: <password>qwerty</password>.
|
||||||
|
Password could be empty.
|
||||||
|
|
||||||
|
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||||
|
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||||
|
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||||
|
|
||||||
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||||
|
place its name in 'server' element inside 'ldap' element.
|
||||||
|
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||||
|
|
||||||
|
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||||
|
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||||
|
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||||
|
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||||
|
whose initiator's realm matches it.
|
||||||
|
Example: <kerberos />
|
||||||
|
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||||
|
|
||||||
|
How to generate decent password:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
|
||||||
|
How to generate double SHA1:
|
||||||
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
In first line will be password and in second - corresponding double SHA1.
|
||||||
|
-->
|
||||||
|
<password></password>
|
||||||
|
|
||||||
|
<!-- List of networks with open access.
|
||||||
|
|
||||||
|
To open access from everywhere, specify:
|
||||||
|
<ip>::/0</ip>
|
||||||
|
|
||||||
|
To open access only from localhost, specify:
|
||||||
|
<ip>::1</ip>
|
||||||
|
<ip>127.0.0.1</ip>
|
||||||
|
|
||||||
|
Each element of list has one of the following forms:
|
||||||
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
|
<host> Hostname. Example: server01.clickhouse.com.
|
||||||
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
|
Strongly recommended that regexp is ends with $
|
||||||
|
All results of DNS requests are cached till server restart.
|
||||||
|
-->
|
||||||
|
<networks>
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
|
||||||
|
<!-- Settings profile for user. -->
|
||||||
|
<profile>default</profile>
|
||||||
|
|
||||||
|
<!-- Quota for user. -->
|
||||||
|
<quota>default</quota>
|
||||||
|
|
||||||
|
<!-- User can create other users and grant rights to them. -->
|
||||||
|
<!-- <access_management>1</access_management> -->
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
|
||||||
|
<!-- Quotas. -->
|
||||||
|
<quotas>
|
||||||
|
<!-- Name of quota. -->
|
||||||
|
<default>
|
||||||
|
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||||
|
<interval>
|
||||||
|
<!-- Length of interval. -->
|
||||||
|
<duration>3600</duration>
|
||||||
|
|
||||||
|
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||||
|
<queries>0</queries>
|
||||||
|
<errors>0</errors>
|
||||||
|
<result_rows>0</result_rows>
|
||||||
|
<read_rows>0</read_rows>
|
||||||
|
<execution_time>0</execution_time>
|
||||||
|
</interval>
|
||||||
|
</default>
|
||||||
|
</quotas>
|
||||||
|
</clickhouse>
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
server {
|
server {
|
||||||
listen 3301;
|
listen 3301;
|
||||||
server_name _;
|
server_name _;
|
||||||
|
|
||||||
gzip on;
|
gzip on;
|
||||||
gzip_static on;
|
gzip_static on;
|
||||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||||
@@ -9,25 +9,34 @@ server {
|
|||||||
gzip_vary on;
|
gzip_vary on;
|
||||||
gzip_comp_level 6;
|
gzip_comp_level 6;
|
||||||
gzip_buffers 16 8k;
|
gzip_buffers 16 8k;
|
||||||
gzip_http_version 1.1;
|
gzip_http_version 1.1;
|
||||||
|
|
||||||
|
# to handle uri issue 414 from nginx
|
||||||
|
client_max_body_size 24M;
|
||||||
|
|
||||||
|
large_client_header_buffers 8 16k;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
root /usr/share/nginx/html;
|
if ( $uri = '/index.html' ) {
|
||||||
index index.html index.htm;
|
add_header Cache-Control no-store always;
|
||||||
try_files $uri $uri/ /index.html;
|
}
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
}
|
}
|
||||||
location /api/alertmanager{
|
|
||||||
proxy_pass http://alertmanager:9093/api/v2;
|
location /api/alertmanager {
|
||||||
|
proxy_pass http://alertmanager:9093/api/v2;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /api {
|
location /api {
|
||||||
proxy_pass http://query-service:8080/api;
|
proxy_pass http://query-service:8080/api;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# redirect server error pages to the static page /50x.html
|
# redirect server error pages to the static page /50x.html
|
||||||
#
|
#
|
||||||
error_page 500 502 503 504 /50x.html;
|
error_page 500 502 503 504 /50x.html;
|
||||||
location = /50x.html {
|
location = /50x.html {
|
||||||
root /usr/share/nginx/html;
|
root /usr/share/nginx/html;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,273 +0,0 @@
|
|||||||
version: "2.4"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
metadata_data: {}
|
|
||||||
middle_var: {}
|
|
||||||
historical_var: {}
|
|
||||||
broker_var: {}
|
|
||||||
coordinator_var: {}
|
|
||||||
router_var: {}
|
|
||||||
|
|
||||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
|
||||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
zookeeper:
|
|
||||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
|
||||||
ports:
|
|
||||||
- "2181:2181"
|
|
||||||
environment:
|
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
|
|
||||||
|
|
||||||
kafka:
|
|
||||||
# image: wurstmeister/kafka
|
|
||||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
|
||||||
ports:
|
|
||||||
- "9092:9092"
|
|
||||||
hostname: kafka
|
|
||||||
environment:
|
|
||||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
|
||||||
KAFKA_ADVERTISED_PORT: 9092
|
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
|
||||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
|
||||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
|
||||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
|
||||||
|
|
||||||
healthcheck:
|
|
||||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
|
||||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 10
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
|
|
||||||
postgres:
|
|
||||||
container_name: postgres
|
|
||||||
image: postgres:latest
|
|
||||||
volumes:
|
|
||||||
- metadata_data:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_PASSWORD=FoolishPassword
|
|
||||||
- POSTGRES_USER=druid
|
|
||||||
- POSTGRES_DB=druid
|
|
||||||
|
|
||||||
coordinator:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: coordinator
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/data
|
|
||||||
- coordinator_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
ports:
|
|
||||||
- "8081:8081"
|
|
||||||
command:
|
|
||||||
- coordinator
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/coordinator
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
broker:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: broker
|
|
||||||
volumes:
|
|
||||||
- broker_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8082:8082"
|
|
||||||
command:
|
|
||||||
- broker
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/broker
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
historical:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: historical
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/data
|
|
||||||
- historical_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8083:8083"
|
|
||||||
command:
|
|
||||||
- historical
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/historical
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
middlemanager:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: middlemanager
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/data
|
|
||||||
- middle_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8091:8091"
|
|
||||||
command:
|
|
||||||
- middleManager
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/middlemanager
|
|
||||||
- environment_tiny/common
|
|
||||||
|
|
||||||
router:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: router
|
|
||||||
volumes:
|
|
||||||
- router_var:/opt/druid/var
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8888:8888"
|
|
||||||
command:
|
|
||||||
- router
|
|
||||||
env_file:
|
|
||||||
- environment_tiny/router
|
|
||||||
- environment_tiny/common
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
flatten-processor:
|
|
||||||
image: signoz/flattener-processor:0.4.0
|
|
||||||
container_name: flattener-processor
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- kafka
|
|
||||||
- otel-collector
|
|
||||||
ports:
|
|
||||||
- "8000:8000"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- KAFKA_BROKER=kafka:9092
|
|
||||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
|
||||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
|
||||||
|
|
||||||
|
|
||||||
query-service:
|
|
||||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
|
||||||
container_name: query-service
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
router:
|
|
||||||
condition: service_healthy
|
|
||||||
ports:
|
|
||||||
- "8080:8080"
|
|
||||||
volumes:
|
|
||||||
- ../dashboards:/root/config/dashboards
|
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
|
||||||
environment:
|
|
||||||
- DruidClientUrl=http://router:8888
|
|
||||||
- DruidDatasource=flattened_spans
|
|
||||||
- STORAGE=druid
|
|
||||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
|
||||||
- GODEBUG=netdns=go
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
image: signoz/frontend:0.4.1
|
|
||||||
container_name: frontend
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- query-service
|
|
||||||
links:
|
|
||||||
- "query-service"
|
|
||||||
ports:
|
|
||||||
- "3301:3301"
|
|
||||||
volumes:
|
|
||||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
create-supervisor:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: create-supervisor
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
|
||||||
|
|
||||||
|
|
||||||
set-retention:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: set-retention
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
|
||||||
|
|
||||||
otel-collector:
|
|
||||||
image: otel/opentelemetry-collector:0.18.0
|
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
|
||||||
ports:
|
|
||||||
- "1777:1777" # pprof extension
|
|
||||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
|
||||||
- "14268:14268" # Jaeger receiver
|
|
||||||
- "55678" # OpenCensus receiver
|
|
||||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
|
||||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
|
||||||
- "55679:55679" # zpages extension
|
|
||||||
- "13133" # health_check
|
|
||||||
depends_on:
|
|
||||||
kafka:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
|
|
||||||
hotrod:
|
|
||||||
image: jaegertracing/example-hotrod:latest
|
|
||||||
container_name: hotrod
|
|
||||||
ports:
|
|
||||||
- "9000:8080"
|
|
||||||
command: ["all"]
|
|
||||||
environment:
|
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
|
||||||
|
|
||||||
|
|
||||||
load-hotrod:
|
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
|
||||||
ports:
|
|
||||||
- "8089:8089"
|
|
||||||
environment:
|
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
|
||||||
LOCUST_MODE: standalone
|
|
||||||
NO_PROXY: standalone
|
|
||||||
TASK_DELAY_FROM: 5
|
|
||||||
TASK_DELAY_TO: 30
|
|
||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
|
||||||
volumes:
|
|
||||||
- ../common/locust-scripts:/locust
|
|
||||||
|
|
||||||
@@ -1,269 +0,0 @@
|
|||||||
version: "2.4"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
metadata_data: {}
|
|
||||||
middle_var: {}
|
|
||||||
historical_var: {}
|
|
||||||
broker_var: {}
|
|
||||||
coordinator_var: {}
|
|
||||||
router_var: {}
|
|
||||||
|
|
||||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
|
||||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
zookeeper:
|
|
||||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
|
||||||
ports:
|
|
||||||
- "2181:2181"
|
|
||||||
environment:
|
|
||||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
|
||||||
|
|
||||||
|
|
||||||
kafka:
|
|
||||||
# image: wurstmeister/kafka
|
|
||||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
|
||||||
ports:
|
|
||||||
- "9092:9092"
|
|
||||||
hostname: kafka
|
|
||||||
environment:
|
|
||||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
|
||||||
KAFKA_ADVERTISED_PORT: 9092
|
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
|
||||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
|
||||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
|
||||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
|
||||||
|
|
||||||
healthcheck:
|
|
||||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
|
||||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 10
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
|
|
||||||
postgres:
|
|
||||||
container_name: postgres
|
|
||||||
image: postgres:latest
|
|
||||||
volumes:
|
|
||||||
- metadata_data:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_PASSWORD=FoolishPassword
|
|
||||||
- POSTGRES_USER=druid
|
|
||||||
- POSTGRES_DB=druid
|
|
||||||
|
|
||||||
coordinator:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: coordinator
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/druid/deepStorage
|
|
||||||
- coordinator_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
ports:
|
|
||||||
- "8081:8081"
|
|
||||||
command:
|
|
||||||
- coordinator
|
|
||||||
env_file:
|
|
||||||
- environment_small/coordinator
|
|
||||||
|
|
||||||
broker:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: broker
|
|
||||||
volumes:
|
|
||||||
- broker_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8082:8082"
|
|
||||||
command:
|
|
||||||
- broker
|
|
||||||
env_file:
|
|
||||||
- environment_small/broker
|
|
||||||
|
|
||||||
historical:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: historical
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/druid/deepStorage
|
|
||||||
- historical_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8083:8083"
|
|
||||||
command:
|
|
||||||
- historical
|
|
||||||
env_file:
|
|
||||||
- environment_small/historical
|
|
||||||
|
|
||||||
middlemanager:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: middlemanager
|
|
||||||
volumes:
|
|
||||||
- ./storage:/opt/druid/deepStorage
|
|
||||||
- middle_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8091:8091"
|
|
||||||
command:
|
|
||||||
- middleManager
|
|
||||||
env_file:
|
|
||||||
- environment_small/middlemanager
|
|
||||||
|
|
||||||
router:
|
|
||||||
image: apache/druid:0.20.0
|
|
||||||
container_name: router
|
|
||||||
volumes:
|
|
||||||
- router_var:/opt/druid/data
|
|
||||||
depends_on:
|
|
||||||
- zookeeper
|
|
||||||
- postgres
|
|
||||||
- coordinator
|
|
||||||
ports:
|
|
||||||
- "8888:8888"
|
|
||||||
command:
|
|
||||||
- router
|
|
||||||
env_file:
|
|
||||||
- environment_small/router
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
flatten-processor:
|
|
||||||
image: signoz/flattener-processor:0.4.0
|
|
||||||
container_name: flattener-processor
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- kafka
|
|
||||||
- otel-collector
|
|
||||||
ports:
|
|
||||||
- "8000:8000"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- KAFKA_BROKER=kafka:9092
|
|
||||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
|
||||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
|
||||||
|
|
||||||
|
|
||||||
query-service:
|
|
||||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
|
|
||||||
container_name: query-service
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
router:
|
|
||||||
condition: service_healthy
|
|
||||||
ports:
|
|
||||||
- "8080:8080"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- ../dashboards:/root/config/dashboards
|
|
||||||
- ./data/signoz/:/var/lib/signoz/
|
|
||||||
environment:
|
|
||||||
- DruidClientUrl=http://router:8888
|
|
||||||
- DruidDatasource=flattened_spans
|
|
||||||
- STORAGE=druid
|
|
||||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
|
||||||
- GODEBUG=netdns=go
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
image: signoz/frontend:0.4.1
|
|
||||||
container_name: frontend
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- query-service
|
|
||||||
links:
|
|
||||||
- "query-service"
|
|
||||||
ports:
|
|
||||||
- "3301:3301"
|
|
||||||
volumes:
|
|
||||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
|
||||||
|
|
||||||
create-supervisor:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: create-supervisor
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
|
||||||
|
|
||||||
|
|
||||||
set-retention:
|
|
||||||
image: theithollow/hollowapp-blog:curl
|
|
||||||
container_name: set-retention
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- router
|
|
||||||
restart: on-failure:6
|
|
||||||
volumes:
|
|
||||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
|
||||||
|
|
||||||
otel-collector:
|
|
||||||
image: otel/opentelemetry-collector:0.18.0
|
|
||||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
|
||||||
volumes:
|
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
|
||||||
ports:
|
|
||||||
- "1777:1777" # pprof extension
|
|
||||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
|
||||||
- "14268:14268" # Jaeger receiver
|
|
||||||
- "55678" # OpenCensus receiver
|
|
||||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
|
||||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
|
||||||
- "55679:55679" # zpages extension
|
|
||||||
- "13133" # health_check
|
|
||||||
depends_on:
|
|
||||||
kafka:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
|
|
||||||
hotrod:
|
|
||||||
image: jaegertracing/example-hotrod:latest
|
|
||||||
container_name: hotrod
|
|
||||||
ports:
|
|
||||||
- "9000:8080"
|
|
||||||
command: ["all"]
|
|
||||||
environment:
|
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
|
||||||
|
|
||||||
|
|
||||||
load-hotrod:
|
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
|
||||||
container_name: load-hotrod
|
|
||||||
hostname: load-hotrod
|
|
||||||
ports:
|
|
||||||
- "8089:8089"
|
|
||||||
environment:
|
|
||||||
ATTACKED_HOST: http://hotrod:8080
|
|
||||||
LOCUST_MODE: standalone
|
|
||||||
NO_PROXY: standalone
|
|
||||||
TASK_DELAY_FROM: 5
|
|
||||||
TASK_DELAY_TO: 30
|
|
||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
|
||||||
volumes:
|
|
||||||
- ./locust-scripts:/locust
|
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
{
|
|
||||||
"type": "kafka",
|
|
||||||
"dataSchema": {
|
|
||||||
"dataSource": "flattened_spans",
|
|
||||||
"parser": {
|
|
||||||
"type": "string",
|
|
||||||
"parseSpec": {
|
|
||||||
"format": "json",
|
|
||||||
"timestampSpec": {
|
|
||||||
"column": "StartTimeUnixNano",
|
|
||||||
"format": "nano"
|
|
||||||
},
|
|
||||||
"dimensionsSpec": {
|
|
||||||
"dimensions": [
|
|
||||||
"TraceId",
|
|
||||||
"SpanId",
|
|
||||||
"ParentSpanId",
|
|
||||||
"Name",
|
|
||||||
"ServiceName",
|
|
||||||
"References",
|
|
||||||
"Tags",
|
|
||||||
"ExternalHttpMethod",
|
|
||||||
"ExternalHttpUrl",
|
|
||||||
"Component",
|
|
||||||
"DBSystem",
|
|
||||||
"DBName",
|
|
||||||
"DBOperation",
|
|
||||||
"PeerService",
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"name": "TagsKeys",
|
|
||||||
"multiValueHandling": "ARRAY"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"name": "TagsValues",
|
|
||||||
"multiValueHandling": "ARRAY"
|
|
||||||
},
|
|
||||||
{ "name": "DurationNano", "type": "Long" },
|
|
||||||
{ "name": "Kind", "type": "int" },
|
|
||||||
{ "name": "StatusCode", "type": "int" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"metricsSpec" : [
|
|
||||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
|
||||||
],
|
|
||||||
"granularitySpec": {
|
|
||||||
"type": "uniform",
|
|
||||||
"segmentGranularity": "DAY",
|
|
||||||
"queryGranularity": "NONE",
|
|
||||||
"rollup": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tuningConfig": {
|
|
||||||
"type": "kafka",
|
|
||||||
"reportParseExceptions": true
|
|
||||||
},
|
|
||||||
"ioConfig": {
|
|
||||||
"topic": "flattened_spans",
|
|
||||||
"replicas": 1,
|
|
||||||
"taskDuration": "PT20M",
|
|
||||||
"completionTimeout": "PT30M",
|
|
||||||
"consumerProperties": {
|
|
||||||
"bootstrap.servers": "kafka:9092"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=100MiB
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=200MiB
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=2
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=1g
|
|
||||||
DRUID_XMS=1g
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=200MiB
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=2
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=128m
|
|
||||||
DRUID_XMS=128m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=50MiB
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
# For S3 storage
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
|
||||||
|
|
||||||
|
|
||||||
# druid_storage_type=s3
|
|
||||||
# druid_storage_bucket=<s3-bucket-name>
|
|
||||||
# druid_storage_baseKey=druid/segments
|
|
||||||
|
|
||||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
|
||||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
|
||||||
# AWS_REGION=<s3-aws-region>
|
|
||||||
|
|
||||||
# druid_indexer_logs_type=s3
|
|
||||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
|
||||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
|
||||||
|
|
||||||
# -----------------------------------------------------------
|
|
||||||
# For local storage
|
|
||||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
druid_storage_type=local
|
|
||||||
druid_storage_storageDirectory=/opt/data/segments
|
|
||||||
druid_indexer_logs_type=file
|
|
||||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
|
||||||
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=512m
|
|
||||||
DRUID_XMS=512m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
druid_processing_buffer_sizeBytes=50MiB
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
# or more contributor license agreements. See the NOTICE file
|
|
||||||
# distributed with this work for additional information
|
|
||||||
# regarding copyright ownership. The ASF licenses this file
|
|
||||||
# to you under the Apache License, Version 2.0 (the
|
|
||||||
# "License"); you may not use this file except in compliance
|
|
||||||
# with the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on an
|
|
||||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
# KIND, either express or implied. See the License for the
|
|
||||||
# specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Java tuning
|
|
||||||
DRUID_XMX=64m
|
|
||||||
DRUID_XMS=64m
|
|
||||||
DRUID_MAXNEWSIZE=256m
|
|
||||||
DRUID_NEWSIZE=256m
|
|
||||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
|
||||||
|
|
||||||
druid_emitter_logging_logLevel=debug
|
|
||||||
|
|
||||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
|
||||||
|
|
||||||
|
|
||||||
druid_zk_service_host=zookeeper
|
|
||||||
|
|
||||||
druid_metadata_storage_host=
|
|
||||||
druid_metadata_storage_type=postgresql
|
|
||||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
|
||||||
druid_metadata_storage_connector_user=druid
|
|
||||||
druid_metadata_storage_connector_password=FoolishPassword
|
|
||||||
|
|
||||||
druid_coordinator_balancer_strategy=cachingCost
|
|
||||||
|
|
||||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
|
||||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
|
||||||
|
|
||||||
|
|
||||||
druid_processing_numThreads=1
|
|
||||||
druid_processing_numMergeBuffers=2
|
|
||||||
|
|
||||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
receivers:
|
|
||||||
otlp:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
http:
|
|
||||||
jaeger:
|
|
||||||
protocols:
|
|
||||||
grpc:
|
|
||||||
thrift_http:
|
|
||||||
processors:
|
|
||||||
batch:
|
|
||||||
send_batch_size: 1000
|
|
||||||
timeout: 10s
|
|
||||||
memory_limiter:
|
|
||||||
# Same as --mem-ballast-size-mib CLI argument
|
|
||||||
ballast_size_mib: 683
|
|
||||||
# 80% of maximum memory up to 2G
|
|
||||||
limit_mib: 1500
|
|
||||||
# 25% of limit up to 2G
|
|
||||||
spike_limit_mib: 512
|
|
||||||
check_interval: 5s
|
|
||||||
queued_retry:
|
|
||||||
num_workers: 4
|
|
||||||
queue_size: 100
|
|
||||||
retry_on_failure: true
|
|
||||||
extensions:
|
|
||||||
health_check: {}
|
|
||||||
zpages: {}
|
|
||||||
exporters:
|
|
||||||
kafka/traces:
|
|
||||||
brokers:
|
|
||||||
- kafka:9092
|
|
||||||
topic: 'otlp_spans'
|
|
||||||
protocol_version: 2.0.0
|
|
||||||
|
|
||||||
kafka/metrics:
|
|
||||||
brokers:
|
|
||||||
- kafka:9092
|
|
||||||
topic: 'otlp_metrics'
|
|
||||||
protocol_version: 2.0.0
|
|
||||||
service:
|
|
||||||
extensions: [health_check, zpages]
|
|
||||||
pipelines:
|
|
||||||
traces:
|
|
||||||
receivers: [jaeger, otlp]
|
|
||||||
processors: [memory_limiter, batch, queued_retry]
|
|
||||||
exporters: [kafka/traces]
|
|
||||||
metrics:
|
|
||||||
receivers: [otlp]
|
|
||||||
processors: [batch]
|
|
||||||
exporters: [kafka/metrics]
|
|
||||||
@@ -36,9 +36,9 @@ is_mac() {
|
|||||||
[[ $OSTYPE == darwin* ]]
|
[[ $OSTYPE == darwin* ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
is_arm64(){
|
# is_arm64(){
|
||||||
[[ `uname -m` == 'arm64' ]]
|
# [[ `uname -m` == 'arm64' ]]
|
||||||
}
|
# }
|
||||||
|
|
||||||
check_os() {
|
check_os() {
|
||||||
if is_mac; then
|
if is_mac; then
|
||||||
@@ -102,7 +102,7 @@ check_os() {
|
|||||||
# The script should error out in case they aren't available
|
# The script should error out in case they aren't available
|
||||||
check_ports_occupied() {
|
check_ports_occupied() {
|
||||||
local port_check_output
|
local port_check_output
|
||||||
local ports_pattern="80|3301|8080"
|
local ports_pattern="3301|4317"
|
||||||
|
|
||||||
if is_mac; then
|
if is_mac; then
|
||||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||||
@@ -119,7 +119,7 @@ check_ports_occupied() {
|
|||||||
send_event "port_not_available"
|
send_event "port_not_available"
|
||||||
|
|
||||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||||
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
|
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||||
echo ""
|
echo ""
|
||||||
@@ -133,58 +133,44 @@ install_docker() {
|
|||||||
|
|
||||||
|
|
||||||
if [[ $package_manager == apt-get ]]; then
|
if [[ $package_manager == apt-get ]]; then
|
||||||
apt_cmd="sudo apt-get --yes --quiet"
|
apt_cmd="$sudo_cmd apt-get --yes --quiet"
|
||||||
$apt_cmd update
|
$apt_cmd update
|
||||||
$apt_cmd install software-properties-common gnupg-agent
|
$apt_cmd install software-properties-common gnupg-agent
|
||||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
|
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||||
sudo add-apt-repository \
|
$sudo_cmd add-apt-repository \
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||||
$apt_cmd update
|
$apt_cmd update
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||||
elif [[ $package_manager == zypper ]]; then
|
elif [[ $package_manager == zypper ]]; then
|
||||||
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
|
zypper_cmd="$sudo_cmd zypper --quiet --no-gpg-checks --non-interactive"
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
if [[ $os == sles ]]; then
|
if [[ $os == sles ]]; then
|
||||||
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
||||||
os_arch="$(uname -i)"
|
os_arch="$(uname -i)"
|
||||||
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||||
fi
|
fi
|
||||||
$zypper_cmd install docker docker-runc containerd
|
$zypper_cmd install docker docker-runc containerd
|
||||||
sudo systemctl enable docker.service
|
$sudo_cmd systemctl enable docker.service
|
||||||
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
||||||
echo
|
echo
|
||||||
echo "Amazon Linux detected ... "
|
echo "Amazon Linux detected ... "
|
||||||
echo
|
echo
|
||||||
# sudo yum install docker
|
# yum install docker
|
||||||
# sudo service docker start
|
# service docker start
|
||||||
sudo amazon-linux-extras install docker
|
$sudo_cmd yum install -y amazon-linux-extras
|
||||||
|
$sudo_cmd amazon-linux-extras enable docker
|
||||||
|
$sudo_cmd yum install -y docker
|
||||||
else
|
else
|
||||||
|
|
||||||
yum_cmd="sudo yum --assumeyes --quiet"
|
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
|
||||||
$yum_cmd install yum-utils
|
$yum_cmd install yum-utils
|
||||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||||
echo "Installing docker"
|
echo "Installing docker"
|
||||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
|
||||||
install_docker_machine() {
|
|
||||||
|
|
||||||
echo "\nInstalling docker machine ..."
|
|
||||||
|
|
||||||
if [[ $os == "Mac" ]];then
|
|
||||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
|
|
||||||
chmod +x /usr/local/bin/docker-machine
|
|
||||||
else
|
|
||||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
|
|
||||||
chmod +x /tmp/docker-machine
|
|
||||||
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
install_docker_compose() {
|
install_docker_compose() {
|
||||||
@@ -192,9 +178,9 @@ install_docker_compose() {
|
|||||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||||
echo "++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++"
|
||||||
echo "Installing docker-compose"
|
echo "Installing docker-compose"
|
||||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||||
sudo chmod +x /usr/local/bin/docker-compose
|
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||||
echo "docker-compose installed!"
|
echo "docker-compose installed!"
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
@@ -210,16 +196,23 @@ install_docker_compose() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
start_docker() {
|
start_docker() {
|
||||||
echo "Starting Docker ..."
|
echo -e "🐳 Starting Docker ...\n"
|
||||||
if [ $os = "Mac" ]; then
|
if [[ $os == "Mac" ]]; then
|
||||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||||
else
|
else
|
||||||
if ! sudo systemctl is-active docker.service > /dev/null; then
|
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||||
echo "Starting docker service"
|
echo "Starting docker service"
|
||||||
sudo systemctl start docker.service
|
$sudo_cmd systemctl start docker.service
|
||||||
|
fi
|
||||||
|
if [[ -z $sudo_cmd ]]; then
|
||||||
|
docker ps > /dev/null && true
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
request_sudo
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_for_containers_start() {
|
wait_for_containers_start() {
|
||||||
local timeout=$1
|
local timeout=$1
|
||||||
|
|
||||||
@@ -229,16 +222,6 @@ wait_for_containers_start() {
|
|||||||
if [[ status_code -eq 200 ]]; then
|
if [[ status_code -eq 200 ]]; then
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
if [ $setup_type == 'druid' ]; then
|
|
||||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
|
||||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
|
||||||
|
|
||||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
|
||||||
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
|
|
||||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
||||||
fi
|
fi
|
||||||
((timeout--))
|
((timeout--))
|
||||||
@@ -249,31 +232,26 @@ wait_for_containers_start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bye() { # Prints a friendly good bye message and exits the script.
|
bye() { # Prints a friendly good bye message and exits the script.
|
||||||
if [ "$?" -ne 0 ]; then
|
if [[ "$?" -ne 0 ]]; then
|
||||||
set +o errexit
|
set +o errexit
|
||||||
|
|
||||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||||
echo ""
|
echo ""
|
||||||
if [ $setup_type == 'clickhouse' ]; then
|
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||||
if is_arm64; then
|
|
||||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
|
|
||||||
else
|
|
||||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
|
||||||
fi
|
|
||||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|
||||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
if [[ $email == "" ]]; then
|
||||||
read -rp 'Email: ' email
|
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||||
|
|
||||||
while [[ $email == "" ]]
|
|
||||||
do
|
|
||||||
read -rp 'Email: ' email
|
read -rp 'Email: ' email
|
||||||
done
|
|
||||||
|
while [[ $email == "" ]]
|
||||||
|
do
|
||||||
|
read -rp 'Email: ' email
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
send_event "installation_support"
|
send_event "installation_support"
|
||||||
|
|
||||||
@@ -284,33 +262,73 @@ bye() { # Prints a friendly good bye message and exits the script.
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
request_sudo() {
|
||||||
|
if hash sudo 2>/dev/null; then
|
||||||
|
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||||
|
if (( $EUID != 0 )); then
|
||||||
|
sudo_cmd="sudo"
|
||||||
|
echo -e "Please enter your sudo password, if prompt."
|
||||||
|
$sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||||
|
if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||||
|
echo "Need sudo privileges to proceed with the installation."
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "Got it! Thanks!! 🙏\n"
|
||||||
|
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
echo ""
|
||||||
echo -e "👋 Thank you for trying out SigNoz! "
|
echo -e "👋 Thank you for trying out SigNoz! "
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
|
sudo_cmd=""
|
||||||
|
|
||||||
|
# Check sudo permissions
|
||||||
|
if (( $EUID != 0 )); then
|
||||||
|
echo "🟡 Running installer with non-sudo permissions."
|
||||||
|
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
sudo_cmd="sudo"
|
||||||
|
fi
|
||||||
|
|
||||||
# Checking OS and assigning package manager
|
# Checking OS and assigning package manager
|
||||||
desired_os=0
|
desired_os=0
|
||||||
os=""
|
os=""
|
||||||
email=""
|
email=""
|
||||||
echo -e "Detecting your OS ..."
|
echo -e "🌏 Detecting your OS ...\n"
|
||||||
check_os
|
check_os
|
||||||
|
|
||||||
# Obtain unique installation id
|
# Obtain unique installation id
|
||||||
sysinfo="$(uname -a)"
|
sysinfo="$(uname -a)"
|
||||||
if [ $? -ne 0 ]; then
|
if [[ $? -ne 0 ]]; then
|
||||||
uuid="$(uuidgen)"
|
uuid="$(uuidgen)"
|
||||||
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||||
SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
digest_cmd=""
|
||||||
|
if hash shasum 2>/dev/null; then
|
||||||
|
digest_cmd="shasum -a 256"
|
||||||
|
elif hash sha256sum 2>/dev/null; then
|
||||||
|
digest_cmd="sha256sum"
|
||||||
|
elif hash openssl 2>/dev/null; then
|
||||||
|
digest_cmd="openssl dgst -sha256"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $digest_cmd ]]; then
|
||||||
|
SIGNOZ_INSTALLATION_ID="$sysinfo"
|
||||||
else
|
else
|
||||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1)
|
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# echo ""
|
# echo ""
|
||||||
|
|
||||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||||
# echo -e "${RED}2) Kafka + Druid as datastore \n"
|
|
||||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||||
|
|
||||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||||
@@ -323,8 +341,6 @@ fi
|
|||||||
|
|
||||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||||
# setup_type='clickhouse'
|
# setup_type='clickhouse'
|
||||||
# else
|
|
||||||
# setup_type='druid'
|
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
setup_type='clickhouse'
|
setup_type='clickhouse'
|
||||||
@@ -364,13 +380,7 @@ send_event() {
|
|||||||
'installation_error_checks')
|
'installation_error_checks')
|
||||||
event="Installation Error - Checks"
|
event="Installation Error - Checks"
|
||||||
error="Containers not started"
|
error="Containers not started"
|
||||||
if [ $setup_type == 'clickhouse' ]; then
|
others='"data": "some_checks",'
|
||||||
others='"data": "some_checks",'
|
|
||||||
else
|
|
||||||
supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
|
||||||
datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
|
||||||
others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",'
|
|
||||||
fi
|
|
||||||
;;
|
;;
|
||||||
'installation_support')
|
'installation_support')
|
||||||
event="Installation Support"
|
event="Installation Support"
|
||||||
@@ -389,7 +399,7 @@ send_event() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [ "$error" != "" ]; then
|
if [[ "$error" != "" ]]; then
|
||||||
error='"error": "'"$error"'", '
|
error='"error": "'"$error"'", '
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -412,15 +422,28 @@ fi
|
|||||||
|
|
||||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||||
if ! is_command_present docker; then
|
if ! is_command_present docker; then
|
||||||
|
|
||||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||||
|
request_sudo
|
||||||
install_docker
|
install_docker
|
||||||
else
|
# enable docker without sudo from next reboot
|
||||||
|
sudo usermod -aG docker "${USER}"
|
||||||
|
elif is_mac; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||||
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||||
echo "https://docs.docker.com/docker-for-mac/install/"
|
echo "https://docs.docker.com/docker-for-mac/install/"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|
||||||
|
send_event "docker_not_installed"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||||
|
echo "Docker must be installed manually on your machine to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||||
|
echo "https://docs.docker.com/get-docker/"
|
||||||
|
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|
||||||
send_event "docker_not_installed"
|
send_event "docker_not_installed"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -428,43 +451,25 @@ fi
|
|||||||
|
|
||||||
# Install docker-compose
|
# Install docker-compose
|
||||||
if ! is_command_present docker-compose; then
|
if ! is_command_present docker-compose; then
|
||||||
|
request_sudo
|
||||||
install_docker_compose
|
install_docker_compose
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
start_docker
|
start_docker
|
||||||
|
|
||||||
|
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||||
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||||
if [ $setup_type == 'clickhouse' ]; then
|
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||||
if is_arm64; then
|
|
||||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
|
|
||||||
else
|
|
||||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||||
echo
|
echo
|
||||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||||
if [ $setup_type == 'clickhouse' ]; then
|
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||||
if is_arm64; then
|
|
||||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
|
|
||||||
else
|
|
||||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
wait_for_containers_start 60
|
wait_for_containers_start 60
|
||||||
echo ""
|
echo ""
|
||||||
@@ -473,11 +478,9 @@ if [[ $status_code -ne 200 ]]; then
|
|||||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||||
echo ""
|
echo ""
|
||||||
if [ $setup_type == 'clickhouse' ]; then
|
|
||||||
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||||
else
|
|
||||||
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
|
||||||
fi
|
|
||||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||||
@@ -495,15 +498,7 @@ else
|
|||||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
if [ $setup_type == 'clickhouse' ]; then
|
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||||
if is_arm64; then
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
|
|
||||||
else
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
node_modules
|
node_modules
|
||||||
.vscode
|
.vscode
|
||||||
build
|
build
|
||||||
.env
|
.env
|
||||||
|
.git
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
node_modules
|
node_modules
|
||||||
build
|
build
|
||||||
|
*.typegen.ts
|
||||||
|
|||||||
@@ -3,16 +3,23 @@ module.exports = {
|
|||||||
browser: true,
|
browser: true,
|
||||||
es2021: true,
|
es2021: true,
|
||||||
node: true,
|
node: true,
|
||||||
|
'jest/globals': true,
|
||||||
},
|
},
|
||||||
extends: [
|
extends: [
|
||||||
|
'airbnb',
|
||||||
|
'airbnb-typescript',
|
||||||
'eslint:recommended',
|
'eslint:recommended',
|
||||||
'plugin:react/recommended',
|
'plugin:react/recommended',
|
||||||
'plugin:@typescript-eslint/recommended',
|
'plugin:@typescript-eslint/recommended',
|
||||||
'plugin:@typescript-eslint/eslint-recommended',
|
'plugin:@typescript-eslint/eslint-recommended',
|
||||||
'plugin:prettier/recommended',
|
'plugin:prettier/recommended',
|
||||||
|
'plugin:sonarjs/recommended',
|
||||||
|
'plugin:import/errors',
|
||||||
|
'plugin:import/warnings',
|
||||||
],
|
],
|
||||||
parser: '@typescript-eslint/parser',
|
parser: '@typescript-eslint/parser',
|
||||||
parserOptions: {
|
parserOptions: {
|
||||||
|
project: './tsconfig.json',
|
||||||
ecmaFeatures: {
|
ecmaFeatures: {
|
||||||
jsx: true,
|
jsx: true,
|
||||||
},
|
},
|
||||||
@@ -25,10 +32,17 @@ module.exports = {
|
|||||||
'simple-import-sort',
|
'simple-import-sort',
|
||||||
'react-hooks',
|
'react-hooks',
|
||||||
'prettier',
|
'prettier',
|
||||||
|
'jest',
|
||||||
],
|
],
|
||||||
settings: {
|
settings: {
|
||||||
react: {
|
react: {
|
||||||
version: 'latest',
|
version: 'detect',
|
||||||
|
},
|
||||||
|
'import/resolver': {
|
||||||
|
node: {
|
||||||
|
paths: ['src'],
|
||||||
|
extensions: ['.js', '.jsx', '.ts', '.tsx'],
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
rules: {
|
rules: {
|
||||||
@@ -40,9 +54,13 @@ module.exports = {
|
|||||||
],
|
],
|
||||||
'react/prop-types': 'off',
|
'react/prop-types': 'off',
|
||||||
'@typescript-eslint/explicit-function-return-type': 'error',
|
'@typescript-eslint/explicit-function-return-type': 'error',
|
||||||
'@typescript-eslint/no-var-requires': 0,
|
'@typescript-eslint/no-var-requires': 'error',
|
||||||
'react/no-array-index-key': 2,
|
'react/no-array-index-key': 'error',
|
||||||
'linebreak-style': ['error', process.platform === 'win32' ? 'windows' : 'unix'],
|
'linebreak-style': [
|
||||||
|
'error',
|
||||||
|
process.platform === 'win32' ? 'windows' : 'unix',
|
||||||
|
],
|
||||||
|
'@typescript-eslint/default-param-last': 'off',
|
||||||
|
|
||||||
// simple sort error
|
// simple sort error
|
||||||
'simple-import-sort/imports': 'error',
|
'simple-import-sort/imports': 'error',
|
||||||
@@ -50,7 +68,45 @@ module.exports = {
|
|||||||
|
|
||||||
// hooks
|
// hooks
|
||||||
'react-hooks/rules-of-hooks': 'error',
|
'react-hooks/rules-of-hooks': 'error',
|
||||||
'react-hooks/exhaustive-deps': 'warn',
|
'react-hooks/exhaustive-deps': 'error',
|
||||||
|
|
||||||
|
// airbnb
|
||||||
|
'no-underscore-dangle': 'off',
|
||||||
|
'no-console': 'off',
|
||||||
|
'import/prefer-default-export': 'off',
|
||||||
|
'import/extensions': [
|
||||||
|
'error',
|
||||||
|
'ignorePackages',
|
||||||
|
{
|
||||||
|
js: 'never',
|
||||||
|
jsx: 'never',
|
||||||
|
ts: 'never',
|
||||||
|
tsx: 'never',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||||
|
'jsx-a11y/label-has-associated-control': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
required: {
|
||||||
|
some: ['nesting', 'id'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'jsx-a11y/label-has-for': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
required: {
|
||||||
|
some: ['nesting', 'id'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'@typescript-eslint/no-unused-vars': 'error',
|
||||||
|
|
||||||
|
// eslint rules need to remove
|
||||||
|
'no-shadow': 'off',
|
||||||
|
'@typescript-eslint/no-shadow': 'off',
|
||||||
|
'import/no-cycle': 'off',
|
||||||
|
|
||||||
'prettier/prettier': [
|
'prettier/prettier': [
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
4
frontend/.husky/commit-msg
Executable file
4
frontend/.husky/commit-msg
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
. "$(dirname "$0")/_/husky.sh"
|
||||||
|
|
||||||
|
cd frontend && npm run commitlint
|
||||||
4
frontend/.husky/pre-commit
Executable file
4
frontend/.husky/pre-commit
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
. "$(dirname "$0")/_/husky.sh"
|
||||||
|
|
||||||
|
cd frontend && yarn lint-staged
|
||||||
@@ -1 +1 @@
|
|||||||
12.13.0
|
16.15.0
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# stage1 as builder
|
# Builder stage
|
||||||
FROM node:12.18.0 as builder
|
FROM node:16.15.0-slim as builder
|
||||||
|
|
||||||
# Add Maintainer Info
|
# Add Maintainer Info
|
||||||
LABEL maintainer="signoz"
|
LABEL maintainer="signoz"
|
||||||
@@ -9,24 +9,23 @@ ARG TARGETARCH
|
|||||||
|
|
||||||
WORKDIR /frontend
|
WORKDIR /frontend
|
||||||
|
|
||||||
# copy the package.json to install dependencies
|
# Copy the package.json to install dependencies
|
||||||
COPY package.json ./
|
COPY package.json ./
|
||||||
|
|
||||||
# Install the dependencies and make the folder
|
# Install the dependencies and make the folder
|
||||||
RUN yarn install
|
RUN CI=1 yarn install
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Build the project and copy the files
|
# Build the project and copy the files
|
||||||
RUN yarn build
|
RUN yarn build
|
||||||
|
|
||||||
FROM nginx:1.18-alpine
|
|
||||||
|
|
||||||
#!/bin/sh
|
FROM nginx:1.18-alpine
|
||||||
|
|
||||||
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
COPY conf/default.conf /etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
## Remove default nginx index page
|
# Remove default nginx index page
|
||||||
RUN rm -rf /usr/share/nginx/html/*
|
RUN rm -rf /usr/share/nginx/html/*
|
||||||
|
|
||||||
# Copy from the stahg 1
|
# Copy from the stahg 1
|
||||||
@@ -34,4 +33,4 @@ COPY --from=builder /frontend/build /usr/share/nginx/html
|
|||||||
|
|
||||||
EXPOSE 3301
|
EXPOSE 3301
|
||||||
|
|
||||||
ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||||
|
|||||||
6
frontend/babel.config.js
Normal file
6
frontend/babel.config.js
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
module.exports = {
|
||||||
|
presets: [
|
||||||
|
['@babel/preset-env', { targets: { node: 'current' } }],
|
||||||
|
'@babel/preset-typescript',
|
||||||
|
],
|
||||||
|
};
|
||||||
1
frontend/commitlint.config.js
Normal file
1
frontend/commitlint.config.js
Normal file
@@ -0,0 +1 @@
|
|||||||
|
module.exports = { extends: ['@commitlint/config-conventional'] };
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
{
|
|
||||||
"video": false
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
const Login = ({ email, name }: LoginProps): void => {
|
|
||||||
const emailInput = cy.findByPlaceholderText('mike@netflix.com');
|
|
||||||
|
|
||||||
emailInput.then((emailInput) => {
|
|
||||||
const element = emailInput[0];
|
|
||||||
// element is present
|
|
||||||
expect(element).not.undefined;
|
|
||||||
expect(element.nodeName).to.be.equal('INPUT');
|
|
||||||
});
|
|
||||||
emailInput.type(email).then((inputElements) => {
|
|
||||||
const inputElement = inputElements[0];
|
|
||||||
const inputValue = inputElement.getAttribute('value');
|
|
||||||
expect(inputValue).to.be.equals(email);
|
|
||||||
});
|
|
||||||
|
|
||||||
const firstNameInput = cy.findByPlaceholderText('Mike');
|
|
||||||
firstNameInput.then((firstNameInput) => {
|
|
||||||
const element = firstNameInput[0];
|
|
||||||
// element is present
|
|
||||||
expect(element).not.undefined;
|
|
||||||
expect(element.nodeName).to.be.equal('INPUT');
|
|
||||||
});
|
|
||||||
|
|
||||||
firstNameInput.type(name).then((inputElements) => {
|
|
||||||
const inputElement = inputElements[0];
|
|
||||||
const inputValue = inputElement.getAttribute('value');
|
|
||||||
expect(inputValue).to.be.equals(name);
|
|
||||||
});
|
|
||||||
|
|
||||||
const gettingStartedButton = cy.findByText('Get Started');
|
|
||||||
gettingStartedButton.click();
|
|
||||||
|
|
||||||
cy
|
|
||||||
.intercept('POST', '/api/v1/user?email*', {
|
|
||||||
statusCode: 200,
|
|
||||||
})
|
|
||||||
.as('defaultUser');
|
|
||||||
|
|
||||||
cy.wait('@defaultUser');
|
|
||||||
};
|
|
||||||
|
|
||||||
export interface LoginProps {
|
|
||||||
email: string;
|
|
||||||
name: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export default Login;
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
import {
|
|
||||||
getDefaultOption,
|
|
||||||
getOptions,
|
|
||||||
} from 'container/Header/DateTimeSelection/config';
|
|
||||||
// import { AppState } from 'store/reducers';
|
|
||||||
|
|
||||||
const CheckRouteDefaultGlobalTimeOptions = ({
|
|
||||||
route,
|
|
||||||
}: CheckRouteDefaultGlobalTimeOptionsProps): void => {
|
|
||||||
cy.visit(Cypress.env('baseUrl') + route);
|
|
||||||
|
|
||||||
const allOptions = getOptions(route);
|
|
||||||
|
|
||||||
const defaultValue = getDefaultOption(route);
|
|
||||||
|
|
||||||
const defaultSelectedOption = allOptions.find((e) => e.value === defaultValue);
|
|
||||||
|
|
||||||
expect(defaultSelectedOption).not.undefined;
|
|
||||||
|
|
||||||
cy
|
|
||||||
.findAllByTestId('dropDown')
|
|
||||||
.find('span')
|
|
||||||
.then((el) => {
|
|
||||||
const elements = el.get();
|
|
||||||
|
|
||||||
const item = elements[1];
|
|
||||||
|
|
||||||
expect(defaultSelectedOption?.label).to.be.equals(
|
|
||||||
item.innerText,
|
|
||||||
'Default option is not matching',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
// cy
|
|
||||||
// .window()
|
|
||||||
// .its('store')
|
|
||||||
// .invoke('getState')
|
|
||||||
// .then((e: AppState) => {
|
|
||||||
// const { globalTime } = e;
|
|
||||||
// const { maxTime, minTime } = globalTime;
|
|
||||||
// // @TODO match the global min time and max time according to the selected option
|
|
||||||
// });
|
|
||||||
};
|
|
||||||
|
|
||||||
export interface CheckRouteDefaultGlobalTimeOptionsProps {
|
|
||||||
route: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export default CheckRouteDefaultGlobalTimeOptions;
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
{
|
|
||||||
"data": [
|
|
||||||
{
|
|
||||||
"created_at": 1638083159246,
|
|
||||||
"data": "{}",
|
|
||||||
"id": 1,
|
|
||||||
"name": "First Channels",
|
|
||||||
"type": "slack",
|
|
||||||
"updated_at": 1638083159246
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created_at": 1638083159246,
|
|
||||||
"data": "{}",
|
|
||||||
"id": 2,
|
|
||||||
"name": "Second Channels",
|
|
||||||
"type": "Slack",
|
|
||||||
"updated_at": 1638083159246
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"message": "Success"
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"serviceName": "frontend",
|
|
||||||
"p99": 1134610000,
|
|
||||||
"avgDuration": 744523000,
|
|
||||||
"numCalls": 267,
|
|
||||||
"callRate": 0.89,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0,
|
|
||||||
"num4XX": 0,
|
|
||||||
"fourXXRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"serviceName": "customer",
|
|
||||||
"p99": 734422400,
|
|
||||||
"avgDuration": 348678530,
|
|
||||||
"numCalls": 267,
|
|
||||||
"callRate": 0.89,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0,
|
|
||||||
"num4XX": 0,
|
|
||||||
"fourXXRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"serviceName": "driver",
|
|
||||||
"p99": 239234080,
|
|
||||||
"avgDuration": 204662290,
|
|
||||||
"numCalls": 267,
|
|
||||||
"callRate": 0.89,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0,
|
|
||||||
"num4XX": 0,
|
|
||||||
"fourXXRate": 0
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
{
|
|
||||||
"status": "success",
|
|
||||||
"data": {
|
|
||||||
"rules": [
|
|
||||||
{
|
|
||||||
"labels": { "severity": "warning" },
|
|
||||||
"annotations": {},
|
|
||||||
"state": "firing",
|
|
||||||
"name": "First Rule",
|
|
||||||
"id": 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"labels": { "severity": "warning" },
|
|
||||||
"annotations": {},
|
|
||||||
"state": "firing",
|
|
||||||
"name": "Second Rule",
|
|
||||||
"id": 2
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"labels": { "severity": "P0" },
|
|
||||||
"annotations": {},
|
|
||||||
"state": "firing",
|
|
||||||
"name": "Third Rule",
|
|
||||||
"id": 3
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
{ "status": "success", "data": { "resultType": "matrix", "result": [] } }
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"status": "success",
|
|
||||||
"data": {
|
|
||||||
"resultType": "matrix",
|
|
||||||
"result": [
|
|
||||||
{
|
|
||||||
"metric": {},
|
|
||||||
"values": [
|
|
||||||
[1634741764.961, "0.9"],
|
|
||||||
[1634741824.961, "0.9"],
|
|
||||||
[1634741884.961, "0.8666666666666667"],
|
|
||||||
[1634741944.961, "1"],
|
|
||||||
[1634742004.961, "0.9166666666666666"],
|
|
||||||
[1634742064.961, "0.95"],
|
|
||||||
[1634742124.961, "0.9333333333333333"],
|
|
||||||
[1634742184.961, "0.95"],
|
|
||||||
[1634742244.961, "1.0333333333333334"],
|
|
||||||
[1634742304.961, "0.9333333333333333"],
|
|
||||||
[1634742364.961, "0.9166666666666666"],
|
|
||||||
[1634742424.961, "0.9"],
|
|
||||||
[1634742484.961, "1.0166666666666666"],
|
|
||||||
[1634742544.961, "0.8333333333333334"],
|
|
||||||
[1634742604.961, "0.9166666666666666"],
|
|
||||||
[1634742664.961, "0.95"]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"timestamp": 1634742600000000000,
|
|
||||||
"p50": 720048500,
|
|
||||||
"p95": 924409540,
|
|
||||||
"p99": 974744300,
|
|
||||||
"numCalls": 48,
|
|
||||||
"callRate": 0.8,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"timestamp": 1634742540000000000,
|
|
||||||
"p50": 712614000,
|
|
||||||
"p95": 955580700,
|
|
||||||
"p99": 1045595400,
|
|
||||||
"numCalls": 59,
|
|
||||||
"callRate": 0.98333335,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"timestamp": 1634742480000000000,
|
|
||||||
"p50": 720842000,
|
|
||||||
"p95": 887187600,
|
|
||||||
"p99": 943676860,
|
|
||||||
"numCalls": 53,
|
|
||||||
"callRate": 0.8833333,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"timestamp": 1634742420000000000,
|
|
||||||
"p50": 712287000,
|
|
||||||
"p95": 908505540,
|
|
||||||
"p99": 976507650,
|
|
||||||
"numCalls": 58,
|
|
||||||
"callRate": 0.96666664,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"timestamp": 1634742360000000000,
|
|
||||||
"p50": 697125500,
|
|
||||||
"p95": 975581800,
|
|
||||||
"p99": 1190121900,
|
|
||||||
"numCalls": 54,
|
|
||||||
"callRate": 0.9,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"timestamp": 1634742300000000000,
|
|
||||||
"p50": 711592500,
|
|
||||||
"p95": 880559900,
|
|
||||||
"p99": 1100105500,
|
|
||||||
"numCalls": 40,
|
|
||||||
"callRate": 0.6666667,
|
|
||||||
"numErrors": 0,
|
|
||||||
"errorRate": 0
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"p50": 710824000,
|
|
||||||
"p95": 1003231400,
|
|
||||||
"p99": 1231265500,
|
|
||||||
"numCalls": 299,
|
|
||||||
"name": "HTTP GET /dispatch"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
import ROUTES from 'constants/routes';
|
|
||||||
|
|
||||||
describe('App Layout', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
cy.visit(Cypress.env('baseUrl'));
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Check the user is in Logged Out State', async () => {
|
|
||||||
cy.location('pathname').then((e) => {
|
|
||||||
expect(e).to.be.equal(ROUTES.SIGN_UP);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Logged In State', () => {
|
|
||||||
const testEmail = 'test@test.com';
|
|
||||||
const firstName = 'Test';
|
|
||||||
|
|
||||||
cy.login({
|
|
||||||
email: testEmail,
|
|
||||||
name: firstName,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
|
|
||||||
import ROUTES from 'constants/routes';
|
|
||||||
|
|
||||||
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
|
|
||||||
|
|
||||||
describe('Channels', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
|
||||||
|
|
||||||
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Channels', () => {
|
|
||||||
cy
|
|
||||||
.intercept('**channels**', {
|
|
||||||
statusCode: 200,
|
|
||||||
fixture: 'defaultAllChannels',
|
|
||||||
})
|
|
||||||
.as('All Channels');
|
|
||||||
|
|
||||||
cy.wait('@All Channels');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.get('.ant-tabs-tab')
|
|
||||||
.children()
|
|
||||||
.then((e) => {
|
|
||||||
const child = e.get();
|
|
||||||
|
|
||||||
const secondChild = child[1];
|
|
||||||
|
|
||||||
expect(secondChild.outerText).to.be.equals('Alert Channels');
|
|
||||||
|
|
||||||
expect(secondChild.ariaSelected).to.be.equals('true');
|
|
||||||
});
|
|
||||||
|
|
||||||
cy
|
|
||||||
.get('tbody')
|
|
||||||
.should('be.visible')
|
|
||||||
.then((e) => {
|
|
||||||
const allChildren = e.children().get();
|
|
||||||
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
|
|
||||||
|
|
||||||
allChildren.forEach((e, index) => {
|
|
||||||
expect(e.firstChild?.textContent).not.null;
|
|
||||||
expect(e.firstChild?.textContent).to.be.equals(
|
|
||||||
defaultAllChannels.data[index].name,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
import ROUTES from 'constants/routes';
|
|
||||||
|
|
||||||
describe('default time', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Metrics Page default time', () => {
|
|
||||||
cy.checkDefaultGlobalOption({
|
|
||||||
route: ROUTES.APPLICATION,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Dashboard Page default time', () => {
|
|
||||||
cy.checkDefaultGlobalOption({
|
|
||||||
route: ROUTES.ALL_DASHBOARD,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Trace Page default time', () => {
|
|
||||||
cy.checkDefaultGlobalOption({
|
|
||||||
route: ROUTES.TRACE,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Instrumentation Page default time', () => {
|
|
||||||
cy.checkDefaultGlobalOption({
|
|
||||||
route: ROUTES.INSTRUMENTATION,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Service Page default time', () => {
|
|
||||||
cy.checkDefaultGlobalOption({
|
|
||||||
route: ROUTES.SERVICE_MAP,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Settings Page default time', () => {
|
|
||||||
cy.checkDefaultGlobalOption({
|
|
||||||
route: ROUTES.SETTINGS,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,126 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
import getGlobalDropDownFormatedDate from 'lib/getGlobalDropDownFormatedDate';
|
|
||||||
import { AppState } from 'store/reducers';
|
|
||||||
|
|
||||||
import topEndPoints from '../../fixtures/topEndPoints.json';
|
|
||||||
|
|
||||||
describe('Global Time Metrics Application', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
cy.visit(Cypress.env('baseUrl'));
|
|
||||||
|
|
||||||
const testEmail = 'test@test.com';
|
|
||||||
const firstName = 'Test';
|
|
||||||
|
|
||||||
cy.login({
|
|
||||||
email: testEmail,
|
|
||||||
name: firstName,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Metrics Application', async () => {
|
|
||||||
cy
|
|
||||||
.intercept('GET', '/api/v1/services*', {
|
|
||||||
fixture: 'defaultApp.json',
|
|
||||||
})
|
|
||||||
.as('defaultApps');
|
|
||||||
|
|
||||||
cy.wait('@defaultApps');
|
|
||||||
|
|
||||||
//clicking on frontend
|
|
||||||
cy.get('tr:nth-child(1) > td:first-child').click();
|
|
||||||
|
|
||||||
cy
|
|
||||||
.intercept('GET', '/api/v1/service/top_endpoints*', {
|
|
||||||
fixture: 'topEndPoints.json',
|
|
||||||
})
|
|
||||||
.as('topEndPoints');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.intercept('GET', '/api/v1/service/overview?*', {
|
|
||||||
fixture: 'serviceOverview.json',
|
|
||||||
})
|
|
||||||
.as('serviceOverview');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.intercept(
|
|
||||||
'GET',
|
|
||||||
`/api/v1/query_range?query=sum(rate(signoz_latency_count*`,
|
|
||||||
{
|
|
||||||
fixture: 'requestPerSecond.json',
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.as('requestPerSecond');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.window()
|
|
||||||
.its('store')
|
|
||||||
.invoke('getState')
|
|
||||||
.then((e: AppState) => {
|
|
||||||
const { globalTime } = e;
|
|
||||||
|
|
||||||
const { maxTime, minTime } = globalTime;
|
|
||||||
|
|
||||||
// intercepting metrics application call
|
|
||||||
|
|
||||||
cy.wait('@topEndPoints');
|
|
||||||
cy.wait('@serviceOverview');
|
|
||||||
//TODO add errorPercentage also
|
|
||||||
// cy.wait('@errorPercentage');
|
|
||||||
cy.wait('@requestPerSecond');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.get('tbody tr:first-child td:first-child')
|
|
||||||
.then((el) => {
|
|
||||||
const elements = el.get();
|
|
||||||
|
|
||||||
expect(elements.length).to.be.equals(1);
|
|
||||||
|
|
||||||
const element = elements[0];
|
|
||||||
|
|
||||||
expect(element.innerText).to.be.equals(topEndPoints[0].name);
|
|
||||||
})
|
|
||||||
.click();
|
|
||||||
|
|
||||||
cy
|
|
||||||
.findAllByTestId('dropDown')
|
|
||||||
.find('span.ant-select-selection-item')
|
|
||||||
.then((e) => {
|
|
||||||
const elements = e;
|
|
||||||
|
|
||||||
const element = elements[0];
|
|
||||||
|
|
||||||
const customSelectedTime = element.innerText;
|
|
||||||
|
|
||||||
const startTime = new Date(minTime / 1000000);
|
|
||||||
const endTime = new Date(maxTime / 1000000);
|
|
||||||
|
|
||||||
const startString = getGlobalDropDownFormatedDate(startTime);
|
|
||||||
const endString = getGlobalDropDownFormatedDate(endTime);
|
|
||||||
|
|
||||||
const result = `${startString} - ${endString}`;
|
|
||||||
|
|
||||||
expect(customSelectedTime).to.be.equals(result);
|
|
||||||
});
|
|
||||||
|
|
||||||
cy
|
|
||||||
.findByTestId('dropDown')
|
|
||||||
.click()
|
|
||||||
.then(() => {
|
|
||||||
cy.findByTitle('Last 30 min').click();
|
|
||||||
});
|
|
||||||
|
|
||||||
cy
|
|
||||||
.findByTestId('dropDown')
|
|
||||||
.find('span.ant-select-selection-item')
|
|
||||||
.then((e) => {
|
|
||||||
const elements = e;
|
|
||||||
|
|
||||||
const element = elements[0];
|
|
||||||
|
|
||||||
const selectedTime = element.innerText;
|
|
||||||
|
|
||||||
expect(selectedTime).to.be.equals('Last 30 min');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
import ROUTES from 'constants/routes';
|
|
||||||
import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond';
|
|
||||||
|
|
||||||
import defaultApps from '../../fixtures/defaultApp.json';
|
|
||||||
|
|
||||||
describe('Metrics', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
cy.visit(Cypress.env('baseUrl'));
|
|
||||||
|
|
||||||
const testEmail = 'test@test.com';
|
|
||||||
const firstName = 'Test';
|
|
||||||
|
|
||||||
cy.login({
|
|
||||||
email: testEmail,
|
|
||||||
name: firstName,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Default Apps', () => {
|
|
||||||
cy
|
|
||||||
.intercept('GET', '/api/v1/services*', {
|
|
||||||
fixture: 'defaultApp.json',
|
|
||||||
})
|
|
||||||
.as('defaultApps');
|
|
||||||
|
|
||||||
cy.wait('@defaultApps');
|
|
||||||
|
|
||||||
cy.location().then((e) => {
|
|
||||||
expect(e.pathname).to.be.equals(ROUTES.APPLICATION);
|
|
||||||
|
|
||||||
cy.get('tbody').then((elements) => {
|
|
||||||
const trElements = elements.children();
|
|
||||||
expect(trElements.length).to.be.equal(defaultApps.length);
|
|
||||||
const getChildren = (row: Element): Element => {
|
|
||||||
if (row.children.length === 0) {
|
|
||||||
return row;
|
|
||||||
}
|
|
||||||
return getChildren(row.children[0]);
|
|
||||||
};
|
|
||||||
|
|
||||||
// this is row element
|
|
||||||
trElements.map((index, element) => {
|
|
||||||
const [
|
|
||||||
applicationElement,
|
|
||||||
p99Element,
|
|
||||||
errorRateElement,
|
|
||||||
rpsElement,
|
|
||||||
] = element.children;
|
|
||||||
const applicationName = getChildren(applicationElement).innerHTML;
|
|
||||||
const p99Name = getChildren(p99Element).innerHTML;
|
|
||||||
const errorRateName = getChildren(errorRateElement).innerHTML;
|
|
||||||
const rpsName = getChildren(rpsElement).innerHTML;
|
|
||||||
const { serviceName, p99, errorRate, callRate } = defaultApps[index];
|
|
||||||
expect(applicationName).to.be.equal(serviceName);
|
|
||||||
expect(p99Name).to.be.equal(convertToNanoSecondsToSecond(p99).toString());
|
|
||||||
expect(errorRateName).to.be.equals(
|
|
||||||
parseFloat(errorRate.toString()).toFixed(2),
|
|
||||||
);
|
|
||||||
expect(rpsName).to.be.equals(callRate.toString());
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
export {};
|
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
|
|
||||||
import ROUTES from 'constants/routes';
|
|
||||||
|
|
||||||
import defaultRules from '../../fixtures/defaultRules.json';
|
|
||||||
|
|
||||||
describe('Alerts', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
window.localStorage.setItem('isLoggedIn', 'yes');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.intercept('get', '*rules*', {
|
|
||||||
fixture: 'defaultRules',
|
|
||||||
})
|
|
||||||
.as('defaultRules');
|
|
||||||
|
|
||||||
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
|
|
||||||
|
|
||||||
cy.wait('@defaultRules');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Edit Rules Page Failure', async () => {
|
|
||||||
cy
|
|
||||||
.intercept('**/rules/**', {
|
|
||||||
statusCode: 500,
|
|
||||||
})
|
|
||||||
.as('Get Rules Error');
|
|
||||||
|
|
||||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
|
||||||
const firstDelete = e[0];
|
|
||||||
firstDelete.click();
|
|
||||||
|
|
||||||
cy.waitFor('@Get Rules Error');
|
|
||||||
|
|
||||||
cy
|
|
||||||
.window()
|
|
||||||
.location()
|
|
||||||
.then((e) => {
|
|
||||||
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
|
|
||||||
});
|
|
||||||
|
|
||||||
cy.findByText('Something went wrong').then((e) => {
|
|
||||||
expect(e.length).to.be.equals(1);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Edit Rules Page Success', async () => {
|
|
||||||
const text = 'this is the sample value';
|
|
||||||
|
|
||||||
cy
|
|
||||||
.intercept('**/rules/**', {
|
|
||||||
statusCode: 200,
|
|
||||||
body: {
|
|
||||||
data: {
|
|
||||||
data: text,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.as('Get Rules Success');
|
|
||||||
|
|
||||||
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
|
|
||||||
const firstDelete = e[0];
|
|
||||||
firstDelete.click();
|
|
||||||
|
|
||||||
cy.waitFor('@Get Rules Success');
|
|
||||||
|
|
||||||
cy.wait(1000);
|
|
||||||
|
|
||||||
cy.findByText('Save').then((e) => {
|
|
||||||
const [el] = e.get();
|
|
||||||
|
|
||||||
el.click();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('All Rules are rendered correctly', async () => {
|
|
||||||
cy
|
|
||||||
.window()
|
|
||||||
.location()
|
|
||||||
.then(({ pathname }) => {
|
|
||||||
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
|
|
||||||
|
|
||||||
cy.get('tbody').then((e) => {
|
|
||||||
const tarray = e.children().get();
|
|
||||||
|
|
||||||
expect(tarray.length).to.be.equals(3);
|
|
||||||
|
|
||||||
tarray.forEach(({ children }, index) => {
|
|
||||||
const name = children[1]?.textContent;
|
|
||||||
const label = children[2]?.textContent;
|
|
||||||
|
|
||||||
expect(name).to.be.equals(defaultRules.data.rules[index].name);
|
|
||||||
|
|
||||||
const defaultLabels = defaultRules.data.rules[index].labels;
|
|
||||||
|
|
||||||
expect(label).to.be.equals(defaultLabels['severity']);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Rules are Deleted', async () => {
|
|
||||||
cy
|
|
||||||
.intercept('**/rules/**', {
|
|
||||||
body: {
|
|
||||||
data: 'Deleted',
|
|
||||||
message: 'Success',
|
|
||||||
},
|
|
||||||
statusCode: 200,
|
|
||||||
})
|
|
||||||
.as('deleteRules');
|
|
||||||
|
|
||||||
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
|
|
||||||
const firstDelete = e[0];
|
|
||||||
|
|
||||||
firstDelete.click();
|
|
||||||
});
|
|
||||||
|
|
||||||
cy.wait('@deleteRules');
|
|
||||||
|
|
||||||
cy.get('tbody').then((e) => {
|
|
||||||
const trray = e.children().get();
|
|
||||||
expect(trray.length).to.be.equals(2);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
/// <reference types="cypress" />
|
|
||||||
// ***********************************************************
|
|
||||||
// This example plugins/index.js can be used to load plugins
|
|
||||||
//
|
|
||||||
// You can change the location of this file or turn off loading
|
|
||||||
// the plugins file with the 'pluginsFile' configuration option.
|
|
||||||
//
|
|
||||||
// You can read more here:
|
|
||||||
// https://on.cypress.io/plugins-guide
|
|
||||||
// ***********************************************************
|
|
||||||
|
|
||||||
// This function is called when a project is opened or re-opened (e.g. due to
|
|
||||||
// the project's config changing)
|
|
||||||
|
|
||||||
// cypress/plugins/index.ts
|
|
||||||
|
|
||||||
/// <reference types="cypress" />
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @type {Cypress.PluginConfig}
|
|
||||||
*/
|
|
||||||
module.exports = (): void => {
|
|
||||||
return undefined;
|
|
||||||
};
|
|
||||||
|
|
||||||
export {};
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
import '@testing-library/cypress/add-commands';
|
|
||||||
|
|
||||||
import CheckRouteDefaultGlobalTimeOptions, {
|
|
||||||
CheckRouteDefaultGlobalTimeOptionsProps,
|
|
||||||
} from '../CustomFunctions/checkRouteDefaultGlobalTimeOptions';
|
|
||||||
import Login, { LoginProps } from '../CustomFunctions/Login';
|
|
||||||
|
|
||||||
Cypress.Commands.add('login', Login);
|
|
||||||
Cypress.Commands.add(
|
|
||||||
'checkDefaultGlobalOption',
|
|
||||||
CheckRouteDefaultGlobalTimeOptions,
|
|
||||||
);
|
|
||||||
|
|
||||||
declare global {
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-namespace
|
|
||||||
namespace Cypress {
|
|
||||||
interface Chainable {
|
|
||||||
login(props: LoginProps): void;
|
|
||||||
checkDefaultGlobalOption(
|
|
||||||
props: CheckRouteDefaultGlobalTimeOptionsProps,
|
|
||||||
): void;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
// ***********************************************************
|
|
||||||
// This example support/index.js is processed and
|
|
||||||
// loaded automatically before your test files.
|
|
||||||
//
|
|
||||||
// This is a great place to put global configuration and
|
|
||||||
// behavior that modifies Cypress.
|
|
||||||
//
|
|
||||||
// You can change the location of this file or turn off
|
|
||||||
// automatically serving support files with the
|
|
||||||
// 'supportFile' configuration option.
|
|
||||||
//
|
|
||||||
// You can read more here:
|
|
||||||
// https://on.cypress.io/configuration
|
|
||||||
// ***********************************************************
|
|
||||||
|
|
||||||
// Import commands.js using ES2015 syntax:
|
|
||||||
import './commands';
|
|
||||||
|
|
||||||
// Alternatively you can use CommonJS syntax:
|
|
||||||
// require('./commands')
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"extends": "../tsconfig.json",
|
|
||||||
"target": "es5",
|
|
||||||
"lib": ["es5", "dom"],
|
|
||||||
"compilerOptions": {
|
|
||||||
"noEmit": true,
|
|
||||||
// be explicit about types included
|
|
||||||
// to avoid clashing with Jest types
|
|
||||||
"types": ["cypress", "@testing-library/cypress", "node"],
|
|
||||||
"isolatedModules": false
|
|
||||||
},
|
|
||||||
"include": ["../node_modules/cypress", "./**/*.ts"]
|
|
||||||
}
|
|
||||||
@@ -9,15 +9,27 @@ const config: Config.InitialOptions = {
|
|||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
|
||||||
},
|
},
|
||||||
notify: true,
|
globals: {
|
||||||
notifyMode: 'always',
|
extensionsToTreatAsEsm: ['.ts'],
|
||||||
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
'ts-jest': {
|
||||||
transform: {
|
useESM: true,
|
||||||
'\\.(js|jsx|ts|tsx)?$': 'babel-jest',
|
},
|
||||||
},
|
},
|
||||||
|
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
|
||||||
|
preset: 'ts-jest/presets/js-with-ts-esm',
|
||||||
|
transform: {
|
||||||
|
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||||
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
|
},
|
||||||
|
transformIgnorePatterns: ['node_modules/(?!(lodash-es)/)'],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
moduleDirectories: ['node_modules', 'src'],
|
moduleDirectories: ['node_modules', 'src'],
|
||||||
|
testEnvironmentOptions: {
|
||||||
|
'jest-playwright': {
|
||||||
|
browsers: ['chromium', 'firefox', 'webkit'],
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export default config;
|
export default config;
|
||||||
|
|||||||
@@ -2,3 +2,4 @@
|
|||||||
* Adds custom matchers from the react testing library to all tests
|
* Adds custom matchers from the react testing library to all tests
|
||||||
*/
|
*/
|
||||||
import '@testing-library/jest-dom';
|
import '@testing-library/jest-dom';
|
||||||
|
import 'jest-styled-components';
|
||||||
|
|||||||
@@ -7,26 +7,33 @@
|
|||||||
"dev": "cross-env NODE_ENV=development webpack serve --progress",
|
"dev": "cross-env NODE_ENV=development webpack serve --progress",
|
||||||
"build": "webpack --config=webpack.config.prod.js --progress",
|
"build": "webpack --config=webpack.config.prod.js --progress",
|
||||||
"prettify": "prettier --write .",
|
"prettify": "prettier --write .",
|
||||||
"lint": "eslint . --debug",
|
"lint": "eslint ./src",
|
||||||
"lint:fix": "eslint . --fix --debug",
|
"lint:fix": "eslint ./src --fix",
|
||||||
"cypress:open": "cypress open",
|
|
||||||
"cypress:run": "cypress run",
|
|
||||||
"jest": "jest",
|
"jest": "jest",
|
||||||
"jest:coverage": "jest --coverage",
|
"jest:coverage": "jest --coverage",
|
||||||
"jest:watch": "jest --watch",
|
"jest:watch": "jest --watch",
|
||||||
"bundle:size": "bundlesize"
|
"postinstall": "is-ci || yarn husky:configure",
|
||||||
|
"playwright": "playwright test --config=./playwright.config.ts",
|
||||||
|
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
|
||||||
|
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||||
|
"commitlint": "commitlint --edit $1"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=12.13.0"
|
"node": ">=16.15.0"
|
||||||
},
|
},
|
||||||
"author": "",
|
"author": "",
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@ant-design/colors": "^6.0.0",
|
||||||
"@ant-design/icons": "^4.6.2",
|
"@ant-design/icons": "^4.6.2",
|
||||||
|
"@grafana/data": "^8.4.3",
|
||||||
|
"@monaco-editor/react": "^4.3.1",
|
||||||
"@testing-library/jest-dom": "^5.11.4",
|
"@testing-library/jest-dom": "^5.11.4",
|
||||||
"@testing-library/react": "^11.1.0",
|
"@testing-library/react": "^11.1.0",
|
||||||
"@testing-library/user-event": "^12.1.10",
|
"@testing-library/user-event": "^12.1.10",
|
||||||
"antd": "^4.16.13",
|
"@welldone-software/why-did-you-render": "^6.2.1",
|
||||||
|
"@xstate/react": "^3.0.0",
|
||||||
|
"antd": "4.19.2",
|
||||||
"axios": "^0.21.0",
|
"axios": "^0.21.0",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^26.6.0",
|
"babel-jest": "^26.6.0",
|
||||||
@@ -36,6 +43,7 @@
|
|||||||
"babel-preset-react-app": "^10.0.0",
|
"babel-preset-react-app": "^10.0.0",
|
||||||
"chart.js": "^3.4.0",
|
"chart.js": "^3.4.0",
|
||||||
"chartjs-adapter-date-fns": "^2.0.0",
|
"chartjs-adapter-date-fns": "^2.0.0",
|
||||||
|
"color": "^4.2.1",
|
||||||
"cross-env": "^7.0.3",
|
"cross-env": "^7.0.3",
|
||||||
"css-loader": "4.3.0",
|
"css-loader": "4.3.0",
|
||||||
"css-minimizer-webpack-plugin": "^3.2.0",
|
"css-minimizer-webpack-plugin": "^3.2.0",
|
||||||
@@ -47,21 +55,29 @@
|
|||||||
"file-loader": "6.1.1",
|
"file-loader": "6.1.1",
|
||||||
"history": "4.10.1",
|
"history": "4.10.1",
|
||||||
"html-webpack-plugin": "5.1.0",
|
"html-webpack-plugin": "5.1.0",
|
||||||
"jest": "26.6.0",
|
"i18next": "^21.6.12",
|
||||||
|
"i18next-browser-languagedetector": "^6.1.3",
|
||||||
|
"i18next-http-backend": "^1.3.2",
|
||||||
|
"jest": "^27.5.1",
|
||||||
|
"js-base64": "^3.7.2",
|
||||||
"less": "^4.1.2",
|
"less": "^4.1.2",
|
||||||
"less-loader": "^10.2.0",
|
"less-loader": "^10.2.0",
|
||||||
|
"lodash-es": "^4.17.21",
|
||||||
"mini-css-extract-plugin": "2.4.5",
|
"mini-css-extract-plugin": "2.4.5",
|
||||||
"monaco-editor": "^0.30.0",
|
|
||||||
"react": "17.0.0",
|
"react": "17.0.0",
|
||||||
"react-dom": "17.0.0",
|
"react-dom": "17.0.0",
|
||||||
"react-force-graph": "^1.41.0",
|
"react-force-graph": "^1.41.0",
|
||||||
"react-graph-vis": "^1.0.5",
|
"react-graph-vis": "^1.0.5",
|
||||||
"react-grid-layout": "^1.2.5",
|
"react-grid-layout": "^1.3.4",
|
||||||
|
"react-i18next": "^11.16.1",
|
||||||
|
"react-query": "^3.34.19",
|
||||||
"react-redux": "^7.2.2",
|
"react-redux": "^7.2.2",
|
||||||
"react-router-dom": "^5.2.0",
|
"react-router-dom": "^5.2.0",
|
||||||
|
"react-use": "^17.3.2",
|
||||||
"react-vis": "^1.11.7",
|
"react-vis": "^1.11.7",
|
||||||
"redux": "^4.0.5",
|
"redux": "^4.0.5",
|
||||||
"redux-thunk": "^2.3.0",
|
"redux-thunk": "^2.3.0",
|
||||||
|
"stream": "^0.0.2",
|
||||||
"style-loader": "1.3.0",
|
"style-loader": "1.3.0",
|
||||||
"styled-components": "^5.2.1",
|
"styled-components": "^5.2.1",
|
||||||
"terser-webpack-plugin": "^5.2.5",
|
"terser-webpack-plugin": "^5.2.5",
|
||||||
@@ -71,7 +87,8 @@
|
|||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"web-vitals": "^0.2.4",
|
"web-vitals": "^0.2.4",
|
||||||
"webpack": "^5.23.0",
|
"webpack": "^5.23.0",
|
||||||
"webpack-dev-server": "^4.3.1"
|
"webpack-dev-server": "^4.3.1",
|
||||||
|
"xstate": "^4.31.0"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
"production": [
|
"production": [
|
||||||
@@ -92,13 +109,19 @@
|
|||||||
"@babel/preset-env": "^7.12.17",
|
"@babel/preset-env": "^7.12.17",
|
||||||
"@babel/preset-react": "^7.12.13",
|
"@babel/preset-react": "^7.12.13",
|
||||||
"@babel/preset-typescript": "^7.12.17",
|
"@babel/preset-typescript": "^7.12.17",
|
||||||
"@testing-library/cypress": "^8.0.0",
|
"@commitlint/cli": "^16.2.4",
|
||||||
|
"@commitlint/config-conventional": "^16.2.4",
|
||||||
|
"@jest/globals": "^27.5.1",
|
||||||
|
"@playwright/test": "^1.22.0",
|
||||||
|
"@testing-library/react-hooks": "^7.0.2",
|
||||||
|
"@types/color": "^3.0.3",
|
||||||
"@types/compression-webpack-plugin": "^9.0.0",
|
"@types/compression-webpack-plugin": "^9.0.0",
|
||||||
"@types/copy-webpack-plugin": "^8.0.1",
|
"@types/copy-webpack-plugin": "^8.0.1",
|
||||||
"@types/d3": "^6.2.0",
|
"@types/d3": "^6.2.0",
|
||||||
"@types/d3-tip": "^3.5.5",
|
"@types/d3-tip": "^3.5.5",
|
||||||
"@types/jest": "^26.0.15",
|
"@types/jest": "^27.5.1",
|
||||||
"@types/lodash-es": "^4.17.4",
|
"@types/lodash-es": "^4.17.4",
|
||||||
|
"@types/mini-css-extract-plugin": "^2.5.1",
|
||||||
"@types/node": "^16.10.3",
|
"@types/node": "^16.10.3",
|
||||||
"@types/react": "^17.0.0",
|
"@types/react": "^17.0.0",
|
||||||
"@types/react-dom": "^16.9.9",
|
"@types/react-dom": "^16.9.9",
|
||||||
@@ -113,32 +136,48 @@
|
|||||||
"@types/webpack-dev-server": "^4.3.0",
|
"@types/webpack-dev-server": "^4.3.0",
|
||||||
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
"@typescript-eslint/eslint-plugin": "^4.28.2",
|
||||||
"@typescript-eslint/parser": "^4.28.2",
|
"@typescript-eslint/parser": "^4.28.2",
|
||||||
"@welldone-software/why-did-you-render": "^6.2.1",
|
|
||||||
"autoprefixer": "^9.0.0",
|
"autoprefixer": "^9.0.0",
|
||||||
"babel-plugin-styled-components": "^1.12.0",
|
"babel-plugin-styled-components": "^1.12.0",
|
||||||
"bundlesize": "^0.18.1",
|
"compression-webpack-plugin": "9.0.0",
|
||||||
"compression-webpack-plugin": "^9.0.0",
|
|
||||||
"copy-webpack-plugin": "^8.1.0",
|
"copy-webpack-plugin": "^8.1.0",
|
||||||
"critters-webpack-plugin": "^3.0.1",
|
"critters-webpack-plugin": "^3.0.1",
|
||||||
"cypress": "^8.3.0",
|
|
||||||
"eslint": "^7.30.0",
|
"eslint": "^7.30.0",
|
||||||
|
"eslint-config-airbnb": "^19.0.4",
|
||||||
|
"eslint-config-airbnb-typescript": "^16.1.4",
|
||||||
"eslint-config-prettier": "^8.3.0",
|
"eslint-config-prettier": "^8.3.0",
|
||||||
"eslint-config-standard": "^16.0.3",
|
"eslint-config-standard": "^16.0.3",
|
||||||
"eslint-plugin-import": "^2.23.4",
|
"eslint-plugin-import": "^2.25.4",
|
||||||
|
"eslint-plugin-jest": "^26.1.2",
|
||||||
|
"eslint-plugin-jsx-a11y": "^6.5.1",
|
||||||
"eslint-plugin-node": "^11.1.0",
|
"eslint-plugin-node": "^11.1.0",
|
||||||
"eslint-plugin-prettier": "^4.0.0",
|
"eslint-plugin-prettier": "^4.0.0",
|
||||||
"eslint-plugin-promise": "^5.1.0",
|
"eslint-plugin-promise": "^5.1.0",
|
||||||
"eslint-plugin-react": "^7.24.0",
|
"eslint-plugin-react": "^7.24.0",
|
||||||
|
"eslint-plugin-react-hooks": "^4.3.0",
|
||||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||||
"husky": "4.3.8",
|
"eslint-plugin-sonarjs": "^0.12.0",
|
||||||
|
"husky": "^7.0.4",
|
||||||
|
"is-ci": "^3.0.1",
|
||||||
|
"jest-playwright-preset": "^1.7.0",
|
||||||
|
"jest-styled-components": "^7.0.8",
|
||||||
"less-plugin-npm-import": "^2.1.0",
|
"less-plugin-npm-import": "^2.1.0",
|
||||||
"lint-staged": "10.5.3",
|
"lint-staged": "^12.3.7",
|
||||||
"lodash-es": "^4.17.21",
|
|
||||||
"portfinder-sync": "^0.0.2",
|
"portfinder-sync": "^0.0.2",
|
||||||
"prettier": "2.2.1",
|
"prettier": "2.2.1",
|
||||||
"react-hot-loader": "^4.13.0",
|
"react-hot-loader": "^4.13.0",
|
||||||
|
"ts-jest": "^27.1.4",
|
||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
|
"typescript-plugin-css-modules": "^3.4.0",
|
||||||
"webpack-bundle-analyzer": "^4.5.0",
|
"webpack-bundle-analyzer": "^4.5.0",
|
||||||
"webpack-cli": "^4.5.0"
|
"webpack-cli": "^4.9.2"
|
||||||
|
},
|
||||||
|
"lint-staged": {
|
||||||
|
"*.(js|jsx|ts|tsx)": [
|
||||||
|
"eslint --fix"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"resolutions": {
|
||||||
|
"@types/react": "17.0.0",
|
||||||
|
"@types/react-dom": "17.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
21
frontend/playwright.config.ts
Normal file
21
frontend/playwright.config.ts
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
import { PlaywrightTestConfig } from '@playwright/test';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const config: PlaywrightTestConfig = {
|
||||||
|
forbidOnly: !!process.env.CI,
|
||||||
|
retries: process.env.CI ? 2 : 0,
|
||||||
|
preserveOutput: 'always',
|
||||||
|
name: 'Signoz',
|
||||||
|
testDir: './tests',
|
||||||
|
use: {
|
||||||
|
trace: 'retain-on-failure',
|
||||||
|
baseURL: process.env.PLAYWRIGHT_TEST_BASE_URL || 'http://localhost:3301',
|
||||||
|
},
|
||||||
|
updateSnapshots: 'all',
|
||||||
|
fullyParallel: false,
|
||||||
|
quiet: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
export default config;
|
||||||
48
frontend/public/locales/en-GB/channels.json
Normal file
48
frontend/public/locales/en-GB/channels.json
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
{
|
||||||
|
"page_title_create": "New Notification Channels",
|
||||||
|
"page_title_edit": "Edit Notification Channels",
|
||||||
|
"button_save_channel": "Save",
|
||||||
|
"button_test_channel": "Test",
|
||||||
|
"button_return": "Back",
|
||||||
|
"field_channel_name": "Name",
|
||||||
|
"field_channel_type": "Type",
|
||||||
|
"field_webhook_url": "Webhook URL",
|
||||||
|
"field_slack_recipient": "Recipient",
|
||||||
|
"field_slack_title": "Title",
|
||||||
|
"field_slack_description": "Description",
|
||||||
|
"field_webhook_username": "User Name (optional)",
|
||||||
|
"field_webhook_password": "Password (optional)",
|
||||||
|
"field_pager_routing_key": "Routing Key",
|
||||||
|
"field_pager_description": "Description",
|
||||||
|
"field_pager_severity": "Severity",
|
||||||
|
"field_pager_details": "Additional Information",
|
||||||
|
"field_pager_component": "Component",
|
||||||
|
"field_pager_group": "Group",
|
||||||
|
"field_pager_class": "Class",
|
||||||
|
"field_pager_client": "Client",
|
||||||
|
"field_pager_client_url": "Client URL",
|
||||||
|
"placeholder_slack_description": "Description",
|
||||||
|
"placeholder_pager_description": "Description",
|
||||||
|
"help_pager_client": "Shows up as event source in Pagerduty",
|
||||||
|
"help_pager_client_url": "Shows up as event source link in Pagerduty",
|
||||||
|
"help_pager_class": "The class/type of the event",
|
||||||
|
"help_pager_details": "Specify a key-value format (must be a valid json)",
|
||||||
|
"help_pager_group": "A cluster or grouping of sources",
|
||||||
|
"help_pager_component": "The part or component of the affected system that is broke",
|
||||||
|
"help_pager_severity": "Severity of the incident, must be one of: must be one of the following: 'critical', 'warning', 'error' or 'info'",
|
||||||
|
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
|
||||||
|
"help_webhook_password": "Specify a password or bearer token",
|
||||||
|
"help_pager_description": "Shows up as description in pagerduty",
|
||||||
|
"channel_creation_done": "Successfully created the channel",
|
||||||
|
"channel_creation_failed": "An unexpected error occurred while creating this channel",
|
||||||
|
"channel_edit_done": "Channels Edited Successfully",
|
||||||
|
"channel_edit_failed": "An unexpected error occurred while updating this channel",
|
||||||
|
"selected_channel_invalid": "Channel type selected is invalid",
|
||||||
|
"username_no_password": "A Password must be provided with user name",
|
||||||
|
"test_unsupported": "Sorry, this channel type does not support test yet",
|
||||||
|
"channel_test_done": "An alert has been sent to this channel",
|
||||||
|
"channel_test_failed": "Failed to send a test message to this channel, please confirm that the parameters are set correctly",
|
||||||
|
"channel_test_unexpected": "An unexpected error occurred while sending a message to this channel, please try again",
|
||||||
|
"webhook_url_required": "Webhook URL is mandatory",
|
||||||
|
"slack_channel_help": "Specify channel or user, use #channel-name, @username (has to be all lowercase, no whitespace)"
|
||||||
|
}
|
||||||
10
frontend/public/locales/en-GB/common.json
Normal file
10
frontend/public/locales/en-GB/common.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"something_went_wrong": "Something went wrong",
|
||||||
|
"already_logged_in": "Already Logged In",
|
||||||
|
"success": "Success",
|
||||||
|
"cancel": "Cancel",
|
||||||
|
"share": "Share",
|
||||||
|
"save": "Save",
|
||||||
|
"edit": "Edit",
|
||||||
|
"logged_in": "Logged In"
|
||||||
|
}
|
||||||
16
frontend/public/locales/en-GB/dashboard.json
Normal file
16
frontend/public/locales/en-GB/dashboard.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"create_dashboard": "Create Dashboard",
|
||||||
|
"import_json": "Import JSON",
|
||||||
|
"copy_to_clipboard": "Copy To ClipBoard",
|
||||||
|
"download_json": "Download JSON",
|
||||||
|
"view_json": "View JSON",
|
||||||
|
"export_dashboard": "Export this dashboard.",
|
||||||
|
"upload_json_file": "Upload JSON file",
|
||||||
|
"paste_json_below": "Paste JSON below",
|
||||||
|
"error_upload_json": "Invalid JSON",
|
||||||
|
"load_json": "Load JSON",
|
||||||
|
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
|
||||||
|
"error_loading_json": "Error loading JSON file",
|
||||||
|
"empty_json_not_allowed": "Empty JSON is not allowed",
|
||||||
|
"new_dashboard_title": "Sample Title"
|
||||||
|
}
|
||||||
7
frontend/public/locales/en-GB/errorDetails.json
Normal file
7
frontend/public/locales/en-GB/errorDetails.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"see_trace_graph": "See what happened before and after this error in a trace graph",
|
||||||
|
"see_error_in_trace_graph": "See the error in trace graph",
|
||||||
|
"stack_trace": "Stacktrace",
|
||||||
|
"older": "Older",
|
||||||
|
"newer": "Newer"
|
||||||
|
}
|
||||||
21
frontend/public/locales/en-GB/generalSettings.json
Normal file
21
frontend/public/locales/en-GB/generalSettings.json
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{
|
||||||
|
"total_retention_period": "Total Retention Period",
|
||||||
|
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
|
||||||
|
"status_message": {
|
||||||
|
"success": "Your last call to change retention period to {{total_retention}} {{s3_part}} was successful.",
|
||||||
|
"failed": "Your last call to change retention period to {{total_retention}} {{s3_part}} failed. Please try again.",
|
||||||
|
"pending": "Your last call to change retention period to {{total_retention}} {{s3_part}} is pending. This may take some time.",
|
||||||
|
"s3_part": "and S3 to {{s3_retention}}"
|
||||||
|
},
|
||||||
|
"retention_save_button": {
|
||||||
|
"pending": "Updating {{name}} retention period",
|
||||||
|
"success": "Save"
|
||||||
|
},
|
||||||
|
"retention_request_race_condition": "Your request to change retention period has failed, as another request is still in process.",
|
||||||
|
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
|
||||||
|
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
|
||||||
|
"retention_comparison_error": "Total retention period for {{name}} can’t be lower or equal to the period after which data is moved to s3.",
|
||||||
|
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
|
||||||
|
"retention_confirmation": "Are you sure you want to change the retention period?",
|
||||||
|
"retention_confirmation_description": "This will change the amount of storage needed for saving {{name}}."
|
||||||
|
}
|
||||||
13
frontend/public/locales/en-GB/organizationsettings.json
Normal file
13
frontend/public/locales/en-GB/organizationsettings.json
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"display_name": "Display Name",
|
||||||
|
"signoz": "SigNoz",
|
||||||
|
"email_address": "Email address",
|
||||||
|
"name_optional": "Name (optional)",
|
||||||
|
"role": "Role",
|
||||||
|
"email_placeholder": "john@signoz.io",
|
||||||
|
"name_placeholder": "John",
|
||||||
|
"add_another_team_member": "Add another team member",
|
||||||
|
"invite_team_members": "Invite team members",
|
||||||
|
"invite_members": "Invite Members",
|
||||||
|
"pending_invites": "Pending Invites"
|
||||||
|
}
|
||||||
9
frontend/public/locales/en-GB/routes.json
Normal file
9
frontend/public/locales/en-GB/routes.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"general": "General",
|
||||||
|
"alert_channels": "Alert Channels",
|
||||||
|
"organization_settings": "Organization Settings",
|
||||||
|
"my_settings": "My Settings",
|
||||||
|
"overview_metrics": "Overview Metrics",
|
||||||
|
"dbcall_metrics": "Database Calls",
|
||||||
|
"external_metrics": "External Calls"
|
||||||
|
}
|
||||||
5
frontend/public/locales/en-GB/settings.json
Normal file
5
frontend/public/locales/en-GB/settings.json
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"current_password": "Current Password",
|
||||||
|
"new_password": "New Password",
|
||||||
|
"change_password": "Change Password"
|
||||||
|
}
|
||||||
17
frontend/public/locales/en-GB/translation.json
Normal file
17
frontend/public/locales/en-GB/translation.json
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
{
|
||||||
|
"monitor_signup": "Monitor your applications. Find what is causing issues.",
|
||||||
|
"version": "Version",
|
||||||
|
"latest_version": "Latest version",
|
||||||
|
"current_version": "Current version",
|
||||||
|
"release_notes": "Release Notes",
|
||||||
|
"read_how_to_upgrade": "Read instructions on how to upgrade",
|
||||||
|
"latest_version_signoz": "You are running the latest version of SigNoz.",
|
||||||
|
"stale_version": "You are on an older version and may be loosing on the latest features we have shipped. We recommend to upgrade to the latest version",
|
||||||
|
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
|
||||||
|
"n_a": "N/A",
|
||||||
|
"routes": {
|
||||||
|
"general": "General",
|
||||||
|
"alert_channels": "Alert Channels",
|
||||||
|
"all_errors": "All Exceptions"
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user