Compare commits
726 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f2ec0d728 | ||
|
|
4f12f8c85c | ||
|
|
7b315c6766 | ||
|
|
676fe892a5 | ||
|
|
15260e0e14 | ||
|
|
ce7be6e7cd | ||
|
|
99d38860cb | ||
|
|
1f4f281965 | ||
|
|
4aa4bf9ea2 | ||
|
|
052eb25cff | ||
|
|
ce14638a63 | ||
|
|
fa142707dc | ||
|
|
5ae4e05c96 | ||
|
|
b7d52b8fba | ||
|
|
1c90e62189 | ||
|
|
cfeb631a6e | ||
|
|
4fc4ab0611 | ||
|
|
b107902c31 | ||
|
|
2d83afd0c4 | ||
|
|
e641577e1c | ||
|
|
3e4b56e012 | ||
|
|
697fd1d1bf | ||
|
|
21dbdb57da | ||
|
|
3406bcaa5f | ||
|
|
de0fd64a5e | ||
|
|
c27c026e25 | ||
|
|
0a4bc7e181 | ||
|
|
b6cfe9d08e | ||
|
|
b5b9f20b1f | ||
|
|
25c6106bd6 | ||
|
|
d5877337ec | ||
|
|
51e0972219 | ||
|
|
38c0bcf4ea | ||
|
|
d863c2781a | ||
|
|
642c6c5920 | ||
|
|
f92e4798ce | ||
|
|
5d080f5564 | ||
|
|
eb9a8e3a97 | ||
|
|
4a13c524a3 | ||
|
|
7c3edec3e6 | ||
|
|
199d6b6213 | ||
|
|
3d46abc1e9 | ||
|
|
e6496ee67b | ||
|
|
fa6d5a7404 | ||
|
|
bd6153225f | ||
|
|
bcceaf7937 | ||
|
|
4a287fd112 | ||
|
|
8ec9cb2222 | ||
|
|
d3094e10bf | ||
|
|
973ef56c09 | ||
|
|
26db6b5fcc | ||
|
|
6e2afe1c78 | ||
|
|
0bcd9d8d98 | ||
|
|
be01bc9b82 | ||
|
|
5a2ad9492c | ||
|
|
747677d4b0 | ||
|
|
e7f49cf360 | ||
|
|
3ba519457a | ||
|
|
8d6646afed | ||
|
|
a4cfb44953 | ||
|
|
c77ad88f90 | ||
|
|
914be6e4cf | ||
|
|
2e9e29eb38 | ||
|
|
bbed3fda22 | ||
|
|
cbaf9b009c | ||
|
|
8471dc0c1b | ||
|
|
49175b3784 | ||
|
|
961dc7e814 | ||
|
|
1315b43aad | ||
|
|
9e6d918d6a | ||
|
|
5b5b19dd99 | ||
|
|
4b8bd2e335 | ||
|
|
7d2883df11 | ||
|
|
cb4e465a10 | ||
|
|
b1ee56b2f2 | ||
|
|
98dfcead5b | ||
|
|
3cc4fb9c30 | ||
|
|
83cb099aa6 | ||
|
|
c480b3c563 | ||
|
|
f084637f84 | ||
|
|
9fd8d12cc0 | ||
|
|
22f9069a29 | ||
|
|
42269a7c78 | ||
|
|
2c62a1c0f0 | ||
|
|
b3729e0b6c | ||
|
|
696a6adc32 | ||
|
|
d964b66bcc | ||
|
|
4a4ad7a3da | ||
|
|
03ef3d3bcd | ||
|
|
d2913a2831 | ||
|
|
4ca3f1f945 | ||
|
|
f2074f01e8 | ||
|
|
ffd5621f09 | ||
|
|
429e3bbd0d | ||
|
|
3f37fe4d60 | ||
|
|
ec3fed05bb | ||
|
|
31583b73d8 | ||
|
|
02ba0eda9a | ||
|
|
7185f2fa24 | ||
|
|
ceb59e8bb5 | ||
|
|
f063a82133 | ||
|
|
072c137f26 | ||
|
|
358fc3a217 | ||
|
|
60d869ddbe | ||
|
|
286d46edbe | ||
|
|
b66ce81eb6 | ||
|
|
60bb82ea9d | ||
|
|
e3987206de | ||
|
|
b8f8d59d40 | ||
|
|
b2fc4776b7 | ||
|
|
dd0047da07 | ||
|
|
d3c67bad5b | ||
|
|
ff3b414645 | ||
|
|
104256dcb5 | ||
|
|
38d89fc34a | ||
|
|
a2d67f1222 | ||
|
|
8e360e001f | ||
|
|
de3928c51f | ||
|
|
228fb66251 | ||
|
|
12c14f71ba | ||
|
|
80de9efa0e | ||
|
|
3890e06d29 | ||
|
|
a34dbc4942 | ||
|
|
4b591fabf7 | ||
|
|
cc978153f9 | ||
|
|
9ba0b84a91 | ||
|
|
ac06b02d52 | ||
|
|
9c173c8eb3 | ||
|
|
d0b21fce01 | ||
|
|
07ffd13159 | ||
|
|
1926998e3c | ||
|
|
eb397babcd | ||
|
|
a0643aaf4e | ||
|
|
169185ff89 | ||
|
|
7feee26f85 | ||
|
|
ce72b1e7a0 | ||
|
|
e06f020162 | ||
|
|
574088ad54 | ||
|
|
6f48030ab9 | ||
|
|
ea3a5e20d9 | ||
|
|
b4833eeb0e | ||
|
|
ce67005d66 | ||
|
|
80c0b5621d | ||
|
|
b21a2707d3 | ||
|
|
4fa5ff9319 | ||
|
|
53528f1045 | ||
|
|
9522bbf33b | ||
|
|
3995de16f0 | ||
|
|
f149258de2 | ||
|
|
da386b0e8e | ||
|
|
75cdac376f | ||
|
|
0ef13a89ed | ||
|
|
084d8ecccd | ||
|
|
b9f3663b6c | ||
|
|
4067aa5025 | ||
|
|
ebf9316714 | ||
|
|
f5009abca6 | ||
|
|
b16a793cbc | ||
|
|
374a2415d9 | ||
|
|
3789e25a1e | ||
|
|
10ab057e29 | ||
|
|
41b9129145 | ||
|
|
f5d10b72f0 | ||
|
|
6fb6a576aa | ||
|
|
7cf567792a | ||
|
|
fe18e85e36 | ||
|
|
147476d802 | ||
|
|
c94f23a710 | ||
|
|
1a2ef4fde6 | ||
|
|
6c505f9e86 | ||
|
|
fb97540c7c | ||
|
|
9cf5c7ef74 | ||
|
|
6223e89d4c | ||
|
|
62f8cddc27 | ||
|
|
ffae767fab | ||
|
|
c23f97c3d0 | ||
|
|
11eb1e4f72 | ||
|
|
0554ed7ecb | ||
|
|
ca4ce0d380 | ||
|
|
65f50bb70d | ||
|
|
dbe9f3a034 | ||
|
|
7cdd136f61 | ||
|
|
21d5e0b71c | ||
|
|
fe53aa412b | ||
|
|
6c5a48082b | ||
|
|
b7adc27f02 | ||
|
|
67b4290846 | ||
|
|
8a7cbc8ad3 | ||
|
|
c74d87a21a | ||
|
|
6486425f46 | ||
|
|
5b316afa12 | ||
|
|
2dcc6fda77 | ||
|
|
a2f570d78c | ||
|
|
ef209e11d5 | ||
|
|
1851e76bca | ||
|
|
fa23050916 | ||
|
|
79475bde71 | ||
|
|
039201acae | ||
|
|
22454abc4a | ||
|
|
4c8b7af0eb | ||
|
|
5caf94f024 | ||
|
|
ce0b37ca2e | ||
|
|
5f529e1c10 | ||
|
|
05c923df9b | ||
|
|
90637212bc | ||
|
|
b58f45c268 | ||
|
|
6a6fd44719 | ||
|
|
81cc120539 | ||
|
|
831381a1ff | ||
|
|
fd0656e0fc | ||
|
|
e217ea0c9c | ||
|
|
bdf9333dcf | ||
|
|
eae97d6ffc | ||
|
|
9f5241e82c | ||
|
|
284eda4072 | ||
|
|
63693a4185 | ||
|
|
d9cf9071d3 | ||
|
|
5e41c7f62b | ||
|
|
e903277143 | ||
|
|
f2def38df8 | ||
|
|
71e742fb2b | ||
|
|
571e087f31 | ||
|
|
3e5f9f3b25 | ||
|
|
c969b5f329 | ||
|
|
5bcf42d398 | ||
|
|
c81b0b2a8b | ||
|
|
d52308c9b5 | ||
|
|
7948bca710 | ||
|
|
29c0b43481 | ||
|
|
9351fd09c2 | ||
|
|
59f32884d2 | ||
|
|
6ccdc5296e | ||
|
|
3ef9d96678 | ||
|
|
642ece288e | ||
|
|
3ab4f71aa1 | ||
|
|
b5be770a03 | ||
|
|
08e3428744 | ||
|
|
b335d440cf | ||
|
|
1293378c5c | ||
|
|
5424c7714f | ||
|
|
95311db543 | ||
|
|
bf52722689 | ||
|
|
6064840dd1 | ||
|
|
182adc551c | ||
|
|
2b5b79e34a | ||
|
|
508c6ced80 | ||
|
|
3c2173de9e | ||
|
|
9a6bcaadf8 | ||
|
|
08bbb0259d | ||
|
|
93638d5615 | ||
|
|
844ca57686 | ||
|
|
b2eec25f33 | ||
|
|
61d01fa2d5 | ||
|
|
a6bf6e4e07 | ||
|
|
d454482f43 | ||
|
|
f6aece6349 | ||
|
|
dc9508269d | ||
|
|
a6c41f312d | ||
|
|
f487f7420b | ||
|
|
da8f3a6e81 | ||
|
|
d102c94670 | ||
|
|
60288f7ba0 | ||
|
|
0cbe17a315 | ||
|
|
dce9f36a8e | ||
|
|
aa5100261d | ||
|
|
f4cc2a3a05 | ||
|
|
041a5249b3 | ||
|
|
a767697a86 | ||
|
|
71cb70c62c | ||
|
|
647cabc4f4 | ||
|
|
e864e33ad3 | ||
|
|
5bdbe792f5 | ||
|
|
399efb0fb2 | ||
|
|
4b72de6884 | ||
|
|
9f1473e7de | ||
|
|
d6c4df8b4b | ||
|
|
7150971dc0 | ||
|
|
d0846b8dd2 | ||
|
|
ead6885b29 | ||
|
|
d72dacdc1f | ||
|
|
1d6ddd4890 | ||
|
|
58daca1579 | ||
|
|
1e522ad8f1 | ||
|
|
8809105a8d | ||
|
|
064c3e0449 | ||
|
|
2a348e916c | ||
|
|
5744193f50 | ||
|
|
ccf352f2db | ||
|
|
6e446dc0ab | ||
|
|
566c2becdf | ||
|
|
3b3fd2b3a9 | ||
|
|
eae53d9eff | ||
|
|
42842b6b17 | ||
|
|
95f8dfb4bc | ||
|
|
a8c5934fc5 | ||
|
|
3f2a4d6eac | ||
|
|
170609a81f | ||
|
|
76fccbbba4 | ||
|
|
147ed9f24b | ||
|
|
a69bc321a9 | ||
|
|
c9e02a8b25 | ||
|
|
24d6a1e7b2 | ||
|
|
a0efa63185 | ||
|
|
fd83cea9a0 | ||
|
|
5be1eb58b2 | ||
|
|
8367c106bc | ||
|
|
8064ae1f37 | ||
|
|
ab4d9af442 | ||
|
|
eb0d3374d5 | ||
|
|
6c4c814b3f | ||
|
|
32e8e48928 | ||
|
|
53e7037f48 | ||
|
|
a566b5dc97 | ||
|
|
4dc668fd13 | ||
|
|
d085506d3e | ||
|
|
1b28a4e6f5 | ||
|
|
20e924b116 | ||
|
|
1d28ceb3d7 | ||
|
|
0ff4c040bf | ||
|
|
1002ab553e | ||
|
|
3dc94c8da7 | ||
|
|
5a5aca2113 | ||
|
|
cb22117a0f | ||
|
|
739946fa47 | ||
|
|
7939902f03 | ||
|
|
d34e08fa3d | ||
|
|
5556d1d6fc | ||
|
|
d4d1104a53 | ||
|
|
225a345baa | ||
|
|
31443dabe7 | ||
|
|
0efb901863 | ||
|
|
eb4abe900c | ||
|
|
e7ba5f9f33 | ||
|
|
995232e057 | ||
|
|
cc5d47e3ee | ||
|
|
b1de6c1d7d | ||
|
|
84bfe11285 | ||
|
|
ca78947a55 | ||
|
|
ac49f84982 | ||
|
|
cc47f02ebf | ||
|
|
ac70240b72 | ||
|
|
78b1a750fa | ||
|
|
d5a6336239 | ||
|
|
01bad0f18a | ||
|
|
1b79a9bf35 | ||
|
|
0426bf06eb | ||
|
|
3d8354fb99 | ||
|
|
696241b962 | ||
|
|
8a883f1b5e | ||
|
|
7765cee610 | ||
|
|
b958bad81f | ||
|
|
deff5d5e17 | ||
|
|
44d3f35a5f | ||
|
|
36d8bc7bc6 | ||
|
|
565dfd5b52 | ||
|
|
897c5d2371 | ||
|
|
f22d5f0fbd | ||
|
|
8c56d04988 | ||
|
|
18cfc40982 | ||
|
|
3c66f9d2dd | ||
|
|
4f3bb95a77 | ||
|
|
8aa5eb78b2 | ||
|
|
79a1f79b7c | ||
|
|
1c5c65ddf7 | ||
|
|
b2e78b9358 | ||
|
|
5e02bfe2e4 | ||
|
|
02d89a3a04 | ||
|
|
3ab0e1395a | ||
|
|
f1f606844a | ||
|
|
4e335054fb | ||
|
|
c902a6bac8 | ||
|
|
c00f0f159b | ||
|
|
86bdb9a5ad | ||
|
|
044f02c7c7 | ||
|
|
561d18efec | ||
|
|
ab10a699b1 | ||
|
|
e28733d246 | ||
|
|
a238123eb2 | ||
|
|
4337ab5cd0 | ||
|
|
7a3a3b8d89 | ||
|
|
a9cbd12330 | ||
|
|
c320c20280 | ||
|
|
b3c2fe75d3 | ||
|
|
95d3a27769 | ||
|
|
67f09c6def | ||
|
|
eaeba43179 | ||
|
|
daadc584ea | ||
|
|
da8b16f588 | ||
|
|
17738a58a2 | ||
|
|
3ebffae1c6 | ||
|
|
2ca67f1017 | ||
|
|
00c7eccb0c | ||
|
|
7f3d9e2e35 | ||
|
|
a95656b3a0 | ||
|
|
9404768f9d | ||
|
|
7559445ebe | ||
|
|
112766b265 | ||
|
|
ccf5af089d | ||
|
|
0b6f31420b | ||
|
|
b4ce805c6f | ||
|
|
08f24fbdff | ||
|
|
191925b418 | ||
|
|
84b70c970f | ||
|
|
988ce36047 | ||
|
|
24a4177a73 | ||
|
|
08ca3b7849 | ||
|
|
d0723207c3 | ||
|
|
16cf829ec3 | ||
|
|
23f9949fad | ||
|
|
fafdd4b87f | ||
|
|
f2ace729fd | ||
|
|
f0c627eebe | ||
|
|
1bf8e6bef6 | ||
|
|
f37e6ef1d1 | ||
|
|
c3ebbfa8ca | ||
|
|
12970d6975 | ||
|
|
1112ff7e7a | ||
|
|
09fd877b2a | ||
|
|
c04c0284dc | ||
|
|
239cdad57b | ||
|
|
314f95a914 | ||
|
|
e070ba61cd | ||
|
|
79576b476f | ||
|
|
8e4f987cf6 | ||
|
|
3fe3bde0c7 | ||
|
|
efe57ff15d | ||
|
|
b8a6a27fad | ||
|
|
fb3dbcf662 | ||
|
|
cb04979bb7 | ||
|
|
967b83a5d0 | ||
|
|
3fd086db4d | ||
|
|
9aedcc1777 | ||
|
|
0fb5b90e4e | ||
|
|
91bdb77a0e | ||
|
|
e7d2bb13dc | ||
|
|
39c3f67d86 | ||
|
|
49d1015a72 | ||
|
|
f3b2f30c82 | ||
|
|
ff9d81aefc | ||
|
|
eb63b6da2a | ||
|
|
c9b07ee5dd | ||
|
|
bda8ddc0e4 | ||
|
|
540a682bf0 | ||
|
|
9f7c60d2c1 | ||
|
|
80a06300ff | ||
|
|
7fae5207d1 | ||
|
|
b4f781ad47 | ||
|
|
d9468d438d | ||
|
|
25559c8781 | ||
|
|
aa760336d0 | ||
|
|
46aaa8199d | ||
|
|
10faa6f42b | ||
|
|
2d5ad346a6 | ||
|
|
207360e074 | ||
|
|
a4b954e304 | ||
|
|
3e24e371f4 | ||
|
|
bdeadaeff6 | ||
|
|
43b39f1a7c | ||
|
|
6ab7fea8de | ||
|
|
05371085f9 | ||
|
|
abe4940e74 | ||
|
|
938f42bd4f | ||
|
|
1652f35f1a | ||
|
|
3a67d0237c | ||
|
|
7888865ddc | ||
|
|
25fb0deb8d | ||
|
|
b067db633a | ||
|
|
9094f20070 | ||
|
|
9129704a93 | ||
|
|
fc244edc50 | ||
|
|
cbe635bc94 | ||
|
|
bb0a7a956a | ||
|
|
2800b021e6 | ||
|
|
74170ffb4a | ||
|
|
ab72e92fc6 | ||
|
|
8b224dd59c | ||
|
|
ce77a3cd80 | ||
|
|
79fa4e7b74 | ||
|
|
90e97856a9 | ||
|
|
61fb11a232 | ||
|
|
729ea586a8 | ||
|
|
eb28459847 | ||
|
|
10fc3bf456 | ||
|
|
244a07aef8 | ||
|
|
92cf617a53 | ||
|
|
1eb333a890 | ||
|
|
ebc280db0e | ||
|
|
3beb7f1843 | ||
|
|
56c9ea5430 | ||
|
|
05c79b7119 | ||
|
|
a6da00f801 | ||
|
|
0514c5035e | ||
|
|
2df058825a | ||
|
|
a07b8999c0 | ||
|
|
5405f0d9ed | ||
|
|
00cefe2306 | ||
|
|
8e621b6a70 | ||
|
|
5aa46c7e96 | ||
|
|
c367f928c5 | ||
|
|
4c2f287e06 | ||
|
|
567c81c0f9 | ||
|
|
31f0a9f0b6 | ||
|
|
bad14a7d09 | ||
|
|
28e8cffeaa | ||
|
|
3302a84ab5 | ||
|
|
5510c67dbf | ||
|
|
347d73022a | ||
|
|
79e43f594d | ||
|
|
0df3052e13 | ||
|
|
baba0c389c | ||
|
|
43983bc643 | ||
|
|
4b9ef95f7a | ||
|
|
3db790c3c7 | ||
|
|
7d68e9cebc | ||
|
|
1e6df307a0 | ||
|
|
e6a53d6c06 | ||
|
|
db9052ea6e | ||
|
|
45eb201efd | ||
|
|
744dfd010a | ||
|
|
dc737f385a | ||
|
|
0d98a4fd0c | ||
|
|
09344cfb44 | ||
|
|
0ae5b824d9 | ||
|
|
828bd3bac6 | ||
|
|
10f4fb53ac | ||
|
|
fdc8670fab | ||
|
|
d7fa503f04 | ||
|
|
b37bc0620d | ||
|
|
9bf37b391e | ||
|
|
a5bf4c1a61 | ||
|
|
420f601a68 | ||
|
|
03eac8963f | ||
|
|
ffd2c9b466 | ||
|
|
51b11d0119 | ||
|
|
c5ee8cd586 | ||
|
|
acbe7f91cb | ||
|
|
07f4fcb216 | ||
|
|
1ee2e302e2 | ||
|
|
be8ec756c6 | ||
|
|
2de6574835 | ||
|
|
1acf009e62 | ||
|
|
9a00dca749 | ||
|
|
d22d1d1c3b | ||
|
|
6342e1cebc | ||
|
|
f74467e33c | ||
|
|
821b80acde | ||
|
|
d41502df98 | ||
|
|
c1d4dc2ad6 | ||
|
|
07183d5189 | ||
|
|
57992134bc | ||
|
|
e0a7002a29 | ||
|
|
cd04a39d3d | ||
|
|
c372eac3e3 | ||
|
|
fdd9287847 | ||
|
|
24f1404741 | ||
|
|
48ac20885f | ||
|
|
ebb1c2ac79 | ||
|
|
e3c4bfce52 | ||
|
|
d92a3e64f5 | ||
|
|
24162f8f96 | ||
|
|
c7ffac46f5 | ||
|
|
0d1526f6af | ||
|
|
b0d68ac00f | ||
|
|
8f0df5e1e3 | ||
|
|
16fbbf8a0e | ||
|
|
e823987eb0 | ||
|
|
f5abab6766 | ||
|
|
b55c362bbb | ||
|
|
6e6fd9b44b | ||
|
|
50a88a8726 | ||
|
|
0f4e5c9ef0 | ||
|
|
be5d1f0090 | ||
|
|
0ab91707e9 | ||
|
|
dcb17fb33a | ||
|
|
9c07ac376d | ||
|
|
40f9a4a5aa | ||
|
|
8059fe14da | ||
|
|
2f665fcc63 | ||
|
|
0e6a1082dc | ||
|
|
1568075769 | ||
|
|
cbbd3ce6ad | ||
|
|
af2399e627 | ||
|
|
50e8f32291 | ||
|
|
8467d6a00c | ||
|
|
cac31072a9 | ||
|
|
8b47f4af21 | ||
|
|
b0b235cbc5 | ||
|
|
e0e4c7afe6 | ||
|
|
1eb0013352 | ||
|
|
68d68c2b57 | ||
|
|
274f1fe07f | ||
|
|
51dc54bcb9 | ||
|
|
0bc82237fc | ||
|
|
53045fc58e | ||
|
|
9808c03d6d | ||
|
|
cf5036bc31 | ||
|
|
e08bf85edf | ||
|
|
e555e05f58 | ||
|
|
88c3f50cb1 | ||
|
|
e4ef059d19 | ||
|
|
b433d4ad4a | ||
|
|
b3d5d6c281 | ||
|
|
9a2aa7bcbd | ||
|
|
63c2e67cfc | ||
|
|
1b398039e3 | ||
|
|
e7253b7ca6 | ||
|
|
dddba68bfd | ||
|
|
40c287028b | ||
|
|
30124de409 | ||
|
|
f9c214bd53 | ||
|
|
7d50895464 | ||
|
|
2031377dcb | ||
|
|
d7eb9f7d0d | ||
|
|
7f800c94ae | ||
|
|
0f39643a56 | ||
|
|
3b687201a6 | ||
|
|
f449775cd6 | ||
|
|
ff2e9ae084 | ||
|
|
7f5b0c15c7 | ||
|
|
d825fc2f30 | ||
|
|
55feec34ea | ||
|
|
6f8b78bd97 | ||
|
|
c4fa86bc95 | ||
|
|
14ea7bd86a | ||
|
|
8bf0123370 | ||
|
|
dc9ffcdd45 | ||
|
|
c79223742f | ||
|
|
d9a99827c0 | ||
|
|
1bf6faff8b | ||
|
|
95fb068bb0 | ||
|
|
0907ed280b | ||
|
|
fc7a0a8354 | ||
|
|
b27e30db58 | ||
|
|
1ab291f3e8 | ||
|
|
552d193cef | ||
|
|
ba0f06f381 | ||
|
|
bbbb1c1d60 | ||
|
|
32a09d4ca2 | ||
|
|
7ae43cf511 | ||
|
|
d1887fdbfe | ||
|
|
5414a73b40 | ||
|
|
7f116d1597 | ||
|
|
2c1b530aa0 | ||
|
|
231b8467fd | ||
|
|
24910f6a39 | ||
|
|
03bf9afe03 | ||
|
|
fbf047a477 | ||
|
|
afc0559456 | ||
|
|
34e9247562 | ||
|
|
bbd90bff0c | ||
|
|
3e0f5a866d | ||
|
|
fb634303e8 | ||
|
|
5e828bf174 | ||
|
|
8f2ed0e46f | ||
|
|
19b25219f4 | ||
|
|
447700326a | ||
|
|
3ed4fb2b75 | ||
|
|
32750fa2af | ||
|
|
47b0671b27 | ||
|
|
9bc62d83d3 | ||
|
|
5e4cff7ae2 | ||
|
|
271ffbd1a1 | ||
|
|
5b691d26e4 | ||
|
|
6b6070fd45 | ||
|
|
28bf2fe3f7 | ||
|
|
849c3d1156 | ||
|
|
b47a3e0932 | ||
|
|
4427f60708 | ||
|
|
319ca6af07 | ||
|
|
0f59baf740 | ||
|
|
4dfbdd2d63 | ||
|
|
73b5134971 | ||
|
|
20879dcf2e | ||
|
|
e2a5729c5e | ||
|
|
556914f808 | ||
|
|
c8830c9e3a | ||
|
|
865e487fc3 | ||
|
|
36e95332bc | ||
|
|
f934f96dd8 | ||
|
|
148d7d99ed | ||
|
|
43369bdefb | ||
|
|
da5bf3aea0 | ||
|
|
28c8df5e63 | ||
|
|
510815655f | ||
|
|
53d52254cb | ||
|
|
655061212f | ||
|
|
ec11abf54e | ||
|
|
f3fb325a13 | ||
|
|
fa11cd651e | ||
|
|
6f57a0c9b2 | ||
|
|
ae4f75e54b | ||
|
|
1e33f16943 | ||
|
|
b4a9b248cf | ||
|
|
4ce1297856 | ||
|
|
e46ff48b80 | ||
|
|
73e3e061e0 | ||
|
|
992644dff7 | ||
|
|
dea74c5f8a | ||
|
|
d2b107ec7f | ||
|
|
1ebf1a3675 | ||
|
|
02446579a6 | ||
|
|
56fcc0c4a7 | ||
|
|
ce78013646 | ||
|
|
8e7367cae1 | ||
|
|
6f4327bfa1 | ||
|
|
050da9a2a9 | ||
|
|
2c1c0ceea6 | ||
|
|
d69a637275 | ||
|
|
d10b9790dc | ||
|
|
917ef533a3 | ||
|
|
76102dfc7e | ||
|
|
a576092cd4 | ||
|
|
a5fd338a9d | ||
|
|
8a781076e1 | ||
|
|
18fc697b91 | ||
|
|
93b347d25e | ||
|
|
ea5b40c7ea | ||
|
|
cc91242e9a | ||
|
|
e756cefa75 | ||
|
|
da653681cf | ||
|
|
93b5a945a4 | ||
|
|
9ab1093d81 | ||
|
|
b4754053aa | ||
|
|
8fef964485 | ||
|
|
004dda200c | ||
|
|
6a01ce88cb |
33
.editorconfig
Normal file
33
.editorconfig
Normal file
@@ -0,0 +1,33 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
# Matches multiple files with brace expansion notation
|
||||
# Set default charset
|
||||
[*.{js,py}]
|
||||
charset = utf-8
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
# Indentation override for all JS under lib directory
|
||||
[lib/**.js]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
6
.github/CODEOWNERS
vendored
Normal file
6
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
* @ankitnayan
|
||||
/frontend/ @palash-signoz @pranshuchittora
|
||||
/deploy/ @prashant-shahi
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -26,6 +26,7 @@ assignees: ''
|
||||
* **Signoz version**:
|
||||
* **Browser version**:
|
||||
* **Your OS and version**:
|
||||
* **Your CPU Architecture**(ARM/Intel):
|
||||
|
||||
## Additional context
|
||||
|
||||
|
||||
31
.github/config.yml
vendored
Normal file
31
.github/config.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Configuration for welcome - https://github.com/behaviorbot/welcome
|
||||
|
||||
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
|
||||
# Comment to be posted to on first time issues
|
||||
newIssueWelcomeComment: >
|
||||
Thanks for opening this issue. A team member should give feedback soon.
|
||||
In the meantime, feel free to check out the [contributing guidelines](https://github.com/signoz/signoz/blob/main/CONTRIBUTING.md).
|
||||
|
||||
|
||||
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
|
||||
# Comment to be posted to on PRs from first time contributors in your repository
|
||||
newPRWelcomeComment: >
|
||||
Welcome to the SigNoz community! Thank you for your first pull request and making this project better. 🤗
|
||||
|
||||
|
||||
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
|
||||
# Comment to be posted to on pull requests merged by a first time user
|
||||
firstPRMergeComment: >
|
||||
Congrats on merging your first pull request!
|
||||
|
||||

|
||||
|
||||
We here at SigNoz are proud of you! 🥳
|
||||
|
||||
|
||||
# Configuration for request-info - https://github.com/behaviorbot/request-info
|
||||
# Comment to be posted in issues or pull requests, when no description is provided.
|
||||
requestInfoReplyComment: >
|
||||
We would appreciate it if you could provide us with more info about this issue/pr!
|
||||
|
||||
requestInfoLabelToAdd: request-more-info
|
||||
21
.github/workflows/README.md
vendored
21
.github/workflows/README.md
vendored
@@ -1,6 +1,18 @@
|
||||
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
||||
# Github actions
|
||||
|
||||
#### Environment Variables
|
||||
## Testing the UI manually on each PR
|
||||
|
||||
First we need to make sure the UI is ready
|
||||
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
|
||||
* This job will run until the PR is merged or closed to keep the local tunneling alive
|
||||
- github will cancel this job if the PR wasn't merged after 6h
|
||||
- if the job was cancel, go to the action and press `Re-run all jobs`
|
||||
|
||||
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
@@ -23,3 +35,8 @@ To run GitHub workflow, a few environment variables needs to add in GitHub secre
|
||||
<td> Docker hub password/token with push permission </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> SONAR_TOKEN </td>
|
||||
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
|
||||
53
.github/workflows/build.yaml
vendored
53
.github/workflows/build.yaml
vendored
@@ -1,42 +1,25 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'frontend/**'
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
get_filters:
|
||||
runs-on: ubuntu-latest
|
||||
# Set job outputs to values from filter step
|
||||
outputs:
|
||||
frontend: ${{ steps.filter.outputs.frontend }}
|
||||
query-service: ${{ steps.filter.outputs.query-service }}
|
||||
flattener: ${{ steps.filter.outputs.flattener }}
|
||||
steps:
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
frontend:
|
||||
- 'frontend/**'
|
||||
query-service:
|
||||
- 'pkg/query-service/**'
|
||||
flattener:
|
||||
- 'pkg/processors/flattener/**'
|
||||
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Run ESLint
|
||||
run: cd frontend && npm run lint
|
||||
- name: TSC
|
||||
run: yarn tsc
|
||||
working-directory: ./frontend
|
||||
- name: Build frontend docker image
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -44,26 +27,10 @@ jobs:
|
||||
|
||||
build-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build query-service image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-flattener-amd64
|
||||
|
||||
build-flattener:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get_filters
|
||||
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build flattener docker image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-query-service-amd64
|
||||
|
||||
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
27
.github/workflows/create-issue-on-pr-merge.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
env:
|
||||
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
jobs:
|
||||
create_issue_on_merge:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: signoz/gh-bot
|
||||
- name: Use Node v16
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Setup Cache & Install Dependencies
|
||||
uses: bahmutov/npm-install@v1
|
||||
with:
|
||||
install-command: yarn --frozen-lockfile
|
||||
- name: Comment on PR
|
||||
run: node create-issue.js
|
||||
84
.github/workflows/e2e-k3s.yaml
vendored
Normal file
84
.github/workflows/e2e-k3s.yaml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: e2e-k3s
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
|
||||
e2e-k3s:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.label.name == 'ok-to-test' }}
|
||||
env:
|
||||
DOCKER_TAG: pull-${{ github.event.number }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build query-service image
|
||||
run: make build-query-service-amd64
|
||||
|
||||
- name: Build frontend image
|
||||
run: make build-frontend-amd64
|
||||
|
||||
- name: Create a k3s cluster
|
||||
uses: AbsaOSS/k3d-action@v2
|
||||
with:
|
||||
cluster-name: "signoz"
|
||||
|
||||
- name: Inject the images to the cluster
|
||||
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
|
||||
|
||||
- name: Set up HotROD sample-app
|
||||
run: |
|
||||
# create sample-application namespace
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
|
||||
- name: Deploy the app
|
||||
run: |
|
||||
# add signoz helm repository
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
|
||||
# create platform namespace
|
||||
kubectl create ns platform
|
||||
|
||||
# installing signoz using helm
|
||||
helm install my-release signoz/signoz -n platform \
|
||||
--wait \
|
||||
--timeout 10m0s \
|
||||
--set frontend.service.type=LoadBalancer \
|
||||
--set queryService.image.tag=$DOCKER_TAG \
|
||||
--set frontend.image.tag=$DOCKER_TAG
|
||||
|
||||
# get pods, services and the container images
|
||||
kubectl get pods -n platform
|
||||
kubectl get svc -n platform
|
||||
|
||||
- name: Kick off a sample-app workload
|
||||
run: |
|
||||
# start the locust swarm
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
|
||||
- name: Get short commit SHA and display tunnel URL
|
||||
id: get-subdomain
|
||||
run: |
|
||||
subdomain="pr-$(git rev-parse --short HEAD)"
|
||||
echo "URL for tunnelling: https://$subdomain.loca.lt"
|
||||
echo "::set-output name=subdomain::$subdomain"
|
||||
|
||||
- name: Start tunnel
|
||||
env:
|
||||
SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }}
|
||||
run: |
|
||||
npm install -g localtunnel
|
||||
host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4)
|
||||
port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1)
|
||||
lt -p $port -l $host -s $SUBDOMAIN
|
||||
22
.github/workflows/playwright.yaml
vendored
Normal file
22
.github/workflows/playwright.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Playwright Tests
|
||||
on:
|
||||
deployment_status:
|
||||
jobs:
|
||||
test:
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.deployment_status.state == 'success'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: "14.x"
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Install Playwright
|
||||
run: npx playwright install --with-deps
|
||||
- name: Run Playwright tests
|
||||
run: npm run test:e2e
|
||||
env:
|
||||
# This might depend on your test-runner/language binding
|
||||
PLAYWRIGHT_TEST_BASE_URL: ${{ github.event.deployment_status.target_url }}
|
||||
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
20
.github/workflows/pr_verify_linked_issue.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# This workflow will inspect a pull request to ensure there is a linked issue or a
|
||||
# valid issue is mentioned in the body. If neither is present it fails the check and adds
|
||||
# a comment alerting users of this missing requirement.
|
||||
name: VerifyIssue
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [edited, synchronize, opened, reopened]
|
||||
check_run:
|
||||
|
||||
jobs:
|
||||
verify_linked_issue:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ensure Pull Request has a linked issue.
|
||||
steps:
|
||||
- name: Verify Linked Issue
|
||||
uses: hattan/verify-linked-issue-action@v1.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
206
.github/workflows/push.yaml
vendored
206
.github/workflows/push.yaml
vendored
@@ -1,172 +1,90 @@
|
||||
name: push-pipeline
|
||||
name: push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- ^v[0-9]*.[0-9]*.x$
|
||||
- develop
|
||||
tags:
|
||||
- "*"
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - main
|
||||
# - v*
|
||||
# paths:
|
||||
# - 'pkg/**'
|
||||
# - 'frontend/**'
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
get-envs:
|
||||
|
||||
image-build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- shell: bash
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v5.1
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
img_tag=""
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ ${array[1]} == "tags" ]
|
||||
then
|
||||
echo "tag build"
|
||||
img_tag=${GITHUB_REF#refs/*/v}
|
||||
elif [ ${array[1]} == "pull" ]
|
||||
then
|
||||
img_tag="pull-${{ github.event.number }}"
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "non tag build"
|
||||
img_tag="latest"
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
# This is a condition where image tag looks like "pull/<pullrequest-name>" during pull request build
|
||||
NEW_IMG_TAG=`echo $img_tag | sed "s/\//-/g"`
|
||||
echo $NEW_IMG_TAG
|
||||
echo export IMG_TAG=$NEW_IMG_TAG >> env-vars
|
||||
echo export FRONTEND_IMAGE="frontend" >> env-vars
|
||||
echo export QUERY_SERVICE="query-service" >> env-vars
|
||||
echo export FLATTENER_PROCESSOR="flattener-processor" >> env-vars
|
||||
- name: Build and push docker image
|
||||
run: make build-push-query-service
|
||||
|
||||
- name: Uploading envs
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: env_artifact
|
||||
path: env-vars
|
||||
|
||||
build-and-push-frontend:
|
||||
image-build-and-push-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get-envs
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Downloading image artifact
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: env_artifact
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: yarn install
|
||||
- name: Run Prettier
|
||||
working-directory: frontend
|
||||
run: npm run prettify
|
||||
continue-on-error: true
|
||||
- name: Run ESLint
|
||||
working-directory: frontend
|
||||
run: npm run lint
|
||||
continue-on-error: true
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Push Frontend Docker Image
|
||||
shell: bash
|
||||
env:
|
||||
FRONTEND_DIRECTORY: "frontend"
|
||||
REPONAME: ${{ secrets.REPONAME }}
|
||||
FRONTEND_DOCKER_IMAGE: ${FRONTEND_IMAGE}
|
||||
DOCKER_TAG: ${IMG_TAG}
|
||||
- uses: benjlevesque/short-sha@v1.2
|
||||
id: short-sha
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v5.1
|
||||
- name: Set docker tag environment
|
||||
run: |
|
||||
branch=${GITHUB_REF#refs/*/}
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
then
|
||||
source env-vars
|
||||
make build-push-frontend
|
||||
fi
|
||||
|
||||
build-and-push-query-service:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get-envs
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Downloading image artifact
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: env_artifact
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Push Query Service Docker Image
|
||||
shell: bash
|
||||
env:
|
||||
QUERY_SERVICE_DIRECTORY: "pkg/query-service"
|
||||
REPONAME: ${{ secrets.REPONAME }}
|
||||
QUERY_SERVICE_DOCKER_IMAGE: ${QUERY_SERVICE}
|
||||
DOCKER_TAG: ${IMG_TAG}
|
||||
run: |
|
||||
branch=${GITHUB_REF#refs/*/}
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] ||[[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
then
|
||||
source env-vars
|
||||
make build-push-query-service
|
||||
fi
|
||||
|
||||
build-and-push-flattener:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- get-envs
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Downloading image artifact
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: env_artifact
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Push Flattener Processor Docker Image
|
||||
shell: bash
|
||||
env:
|
||||
FLATTENER_DIRECTORY: "pkg/processors/flattener"
|
||||
REPONAME: ${{ secrets.REPONAME }}
|
||||
FLATTERNER_DOCKER_IMAGE: ${FLATTENER_PROCESSOR}
|
||||
DOCKER_TAG: ${IMG_TAG}
|
||||
run: |
|
||||
branch=${GITHUB_REF#refs/*/}
|
||||
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
|
||||
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
|
||||
then
|
||||
source env-vars
|
||||
make build-push-flattener
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and push docker image
|
||||
run: make build-push-frontend
|
||||
|
||||
16
.github/workflows/remove-label.yaml
vendored
Normal file
16
.github/workflows/remove-label.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: remove-label
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [synchronize]
|
||||
|
||||
jobs:
|
||||
remove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove label
|
||||
uses: buildsville/add-remove-label@v1
|
||||
with:
|
||||
label: ok-to-test
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
25
.github/workflows/repo-stats.yml
vendored
Normal file
25
.github/workflows/repo-stats.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
on:
|
||||
schedule:
|
||||
# Run this once per day, towards the end of the day for keeping the most
|
||||
# recent data point most meaningful (hours are interpreted in UTC).
|
||||
- cron: "0 8 * * *"
|
||||
workflow_dispatch: # Allow for running this manually.
|
||||
|
||||
jobs:
|
||||
j1:
|
||||
name: repostats
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run-ghrs
|
||||
uses: jgehrcke/github-repo-stats@v1.1.0
|
||||
with:
|
||||
# Define the stats repository (the repo to fetch
|
||||
# stats for and to generate the report for).
|
||||
# Remove the parameter when the stats repository
|
||||
# and the data repository are the same.
|
||||
repository: signoz/signoz
|
||||
# Set a GitHub API token that can read the stats
|
||||
# repository, and that can push to the data
|
||||
# repository (which this workflow file lives in),
|
||||
# to store data and the report files.
|
||||
ghtoken: ${{ github.token }}
|
||||
27
.github/workflows/sonar.yml
vendored
Normal file
27
.github/workflows/sonar.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: sonar
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v*
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
jobs:
|
||||
sonar-analysis:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Sonar analysis
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
with:
|
||||
projectBaseDir: frontend
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
|
||||
16
.gitignore
vendored
16
.gitignore
vendored
@@ -14,6 +14,9 @@ frontend/coverage
|
||||
frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
frontend/test-results
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
@@ -25,15 +28,10 @@ frontend/npm-debug.log*
|
||||
frontend/yarn-debug.log*
|
||||
frontend/yarn-error.log*
|
||||
frontend/src/constants/env.ts
|
||||
frontend/cypress/**/*.mp4
|
||||
|
||||
# env file for cypress
|
||||
frontend/cypress.env.json
|
||||
|
||||
.idea
|
||||
|
||||
**/.vscode
|
||||
*.tgz
|
||||
**/build
|
||||
**/storage
|
||||
**/locust-scripts/__pycache__/
|
||||
@@ -41,3 +39,11 @@ frontend/cypress.env.json
|
||||
|
||||
frontend/*.env
|
||||
pkg/query-service/signoz.db
|
||||
|
||||
pkg/query-service/tests/test-deploy/data/
|
||||
|
||||
|
||||
# local data
|
||||
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
|
||||
36
.gitpod.yml
Normal file
36
.gitpod.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
|
||||
# and commit this file to your remote git repository to share the goodness with others.
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Run Script to Comment ut required lines
|
||||
init: |
|
||||
cd ./.scripts
|
||||
sh commentLinesForSetup.sh
|
||||
|
||||
- name: Run Docker Images
|
||||
init: |
|
||||
cd ./deploy
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
# command:
|
||||
|
||||
- name: Run Frontend
|
||||
init: |
|
||||
cd ./frontend
|
||||
yarn install
|
||||
command:
|
||||
yarn dev
|
||||
|
||||
ports:
|
||||
- port: 3301
|
||||
onOpen: open-browser
|
||||
- port: 8080
|
||||
onOpen: ignore
|
||||
- port: 9000
|
||||
onOpen: ignore
|
||||
- port: 8123
|
||||
onOpen: ignore
|
||||
- port: 8089
|
||||
onOpen: ignore
|
||||
- port: 9093
|
||||
onOpen: ignore
|
||||
7
.scripts/commentLinesForSetup.sh
Normal file
7
.scripts/commentLinesForSetup.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
|
||||
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
|
||||
|
||||
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
|
||||
146
CONTRIBUTING.md
146
CONTRIBUTING.md
@@ -1,13 +1,16 @@
|
||||
# How to Contribute
|
||||
|
||||
There are primarily 3 areas in which you can contribute in SigNoz
|
||||
There are primarily 2 areas in which you can contribute in SigNoz
|
||||
|
||||
- Frontend ( written in Typescript, React)
|
||||
- Query Service (written in Go)
|
||||
- Flattener Processor (written in Go)
|
||||
- Backend - ( Query Service - written in Go)
|
||||
|
||||
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
|
||||
|
||||
> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
|
||||
|
||||
> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
|
||||
|
||||
# Develop Frontend
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
|
||||
@@ -15,22 +18,33 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
|
||||
### Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- run `cd deploy && docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend service)
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L62`
|
||||
- run `cd deploy` to move to deploy directory
|
||||
- Install signoz locally without the frontend
|
||||
- Add below configuration to query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
|
||||
```docker
|
||||
ports:
|
||||
- "8080:8080"
|
||||
```
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
|
||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
|
||||
### Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `<test environment URL>`
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
|
||||
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
|
||||
**_Frontend should now be accessible at `http://localhost:3000/application`_**
|
||||
**_Frontend should now be accessible at `http://localhost:3301/application`_**
|
||||
|
||||
# Contribute to Query-Service
|
||||
|
||||
@@ -38,25 +52,125 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht
|
||||
|
||||
### To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/deploy`
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L22`
|
||||
- Run `docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend and query-service)
|
||||
- `STORAGE=clickhouse ClickHouseUrl=tcp://localhost:9001 go run main.go`
|
||||
- git clone https://github.com/SigNoz/signoz.git
|
||||
- run `cd signoz` to move to signoz directory
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml`
|
||||
- add below configuration to clickhouse section at `docker/clickhouse-setup/docker-compose.yaml`
|
||||
```docker
|
||||
expose:
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
```
|
||||
|
||||
- run `cd pkg/query-service/` to move to query-service directory
|
||||
- Open ./constants/constants.go
|
||||
- Replace ```const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db"``` \
|
||||
with ```const RELATIONAL_DATASOURCE_PATH = "./signoz.db".```
|
||||
|
||||
- Install signoz locally without the frontend and query-service
|
||||
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
|
||||
|
||||
#### Run locally
|
||||
```console
|
||||
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
|
||||
```
|
||||
|
||||
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
|
||||
|
||||
**_Query Service should now be available at `http://localhost:8080`_**
|
||||
|
||||
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
||||
|
||||
# Contribute to Flattener Processor
|
||||
---
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
Not needed to run for the ClickHouse setup
|
||||
Click the button below. A workspace with all required environments will be created.
|
||||
|
||||
more info at [https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener](https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener)
|
||||
[](https://gitpod.io/#https://github.com/SigNoz/signoz)
|
||||
|
||||
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
|
||||
|
||||
# Contribute to SigNoz Helm Chart
|
||||
|
||||
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
|
||||
|
||||
### To run helm chart for local development
|
||||
|
||||
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
|
||||
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [k3d](https://k3d.io/#installation)
|
||||
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
|
||||
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
|
||||
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace.
|
||||
- run `kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
|
||||
**To install HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
**To load data with HotROD sample app:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
**To stop the load generation:**
|
||||
|
||||
```bash
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
**To delete HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## General Instructions
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
|
||||
**Before making any significant changes, please open an issue**. Each issue
|
||||
should describe the following:
|
||||
|
||||
* Requirement - what kind of use case are you trying to solve?
|
||||
* Proposal - what do you suggest to solve the problem or improve the existing
|
||||
situation?
|
||||
* Any open questions to address
|
||||
|
||||
Discussing your proposed changes ahead of time will make the contribution
|
||||
process smooth for everyone. Once the approach is agreed upon, make your changes
|
||||
and open a pull request(s). Unless your change is small, Please consider submitting different PRs:
|
||||
|
||||
* First PR should include the overall structure of the new component:
|
||||
* Readme, configuration, interfaces or base classes etc...
|
||||
* This PR is usually trivial to review, so the size limit does not apply to
|
||||
it.
|
||||
* Second PR should include the concrete implementation of the component. If the
|
||||
size of this PR is larger than the recommended size consider splitting it in
|
||||
multiple PRs.
|
||||
* If there are multiple sub-component then ideally each one should be implemented as
|
||||
a separate pull request.
|
||||
* Last PR should include changes to any user facing documentation. And should include
|
||||
end to end tests if applicable. The component must be enabled
|
||||
only after sufficient testing, and there is enough confidence in the
|
||||
stability and quality of the component.
|
||||
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
|
||||
|
||||
- If you find any bugs, please create an issue
|
||||
- If you find anything missing in documentation, you can create an issue with label **documentation**
|
||||
|
||||
77
Makefile
77
Makefile
@@ -1,19 +1,35 @@
|
||||
#
|
||||
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
|
||||
#
|
||||
|
||||
# Build variables
|
||||
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
# Internal variables or constants.
|
||||
#
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
FLATTENER_DIRECTORY ?= pkg/processors/flattener
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= latest
|
||||
|
||||
FRONTEND_DOCKER_IMAGE ?= frontend
|
||||
FLATTERNER_DOCKER_IMAGE ?= query-service
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= flattener-processor
|
||||
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
|
||||
|
||||
all: build-push-frontend build-push-query-service build-push-flattener
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/query-service
|
||||
buildVersion=${PACKAGE}/version.buildVersion
|
||||
buildHash=${PACKAGE}/version.buildHash
|
||||
buildTime=${PACKAGE}/version.buildTime
|
||||
gitBranch=${PACKAGE}/version.gitBranch
|
||||
|
||||
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
|
||||
|
||||
all: build-push-frontend build-push-query-service
|
||||
# Steps to build and push docker image of frontend
|
||||
.PHONY: build-frontend-amd64 build-push-frontend
|
||||
# Step to build docker image of frontend in amd64 (used in build pipeline)
|
||||
@@ -22,7 +38,8 @@ build-frontend-amd64:
|
||||
@echo "--> Building frontend docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of frontend(used in push pipeline)
|
||||
build-push-frontend:
|
||||
@@ -30,7 +47,8 @@ build-push-frontend:
|
||||
@echo "--> Building and pushing frontend docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
|
||||
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
.PHONY: build-query-service-amd64 build-push-query-service
|
||||
@@ -40,7 +58,8 @@ build-query-service-amd64:
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS) .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-query-service:
|
||||
@@ -48,22 +67,34 @@ build-push-query-service:
|
||||
@echo "--> Building and pushing query-service docker image"
|
||||
@echo "------------------"
|
||||
@cd $(QUERY_SERVICE_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache \
|
||||
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
|
||||
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Steps to build and push docker image of flattener
|
||||
.PHONY: build-flattener-amd64 build-push-flattener
|
||||
# Step to build docker image of flattener in amd64 (used in build pipeline)
|
||||
build-flattener-amd64:
|
||||
dev-setup:
|
||||
mkdir -p /var/lib/signoz
|
||||
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
|
||||
mkdir -p pkg/query-service/config/dashboards
|
||||
@echo "------------------"
|
||||
@echo "--> Building flattener docker image for amd64"
|
||||
@echo "--> Local Setup completed"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
|
||||
|
||||
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
|
||||
build-push-flattener:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing flattener docker image"
|
||||
@echo "------------------"
|
||||
@cd $(FLATTENER_DIRECTORY) && \
|
||||
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
run-x86:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up -d
|
||||
|
||||
run-arm:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml up -d
|
||||
|
||||
down-x86:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
down-arm:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml down -v
|
||||
|
||||
clear-standalone-data:
|
||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
|
||||
clear-swarm-data:
|
||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
||||
|
||||
160
README.de-de.md
Normal file
160
README.de-de.md
Normal file
@@ -0,0 +1,160 @@
|
||||
<p align="center">
|
||||
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
|
||||
|
||||
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
</p>
|
||||
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
##
|
||||
|
||||
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
|
||||
|
||||
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
|
||||
|
||||
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
|
||||
|
||||
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
|
||||
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Werde Teil unserer Slack Community
|
||||
|
||||
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
|
||||
|
||||
## Funktionen:
|
||||
|
||||
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
|
||||
- Übersicht der langsamsten Endpunkte deiner Anwendung.
|
||||
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
|
||||
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
|
||||
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
|
||||
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
|
||||
|
||||
## Wieso SigNoz?
|
||||
|
||||
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
|
||||
|
||||
Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDog, NewRelic für Firmen anbieten, die Datenschutz und Sicherheitsbedenken haben, bei der Weitergabe von Kundendaten an Drittanbieter.
|
||||
|
||||
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
|
||||
|
||||
### Unterstützte Programmiersprachen:
|
||||
|
||||
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
|
||||
|
||||
- Java
|
||||
- Python
|
||||
- NodeJS
|
||||
- Go
|
||||
|
||||
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
|
||||
|
||||
## Erste Schritte mit SigNoz
|
||||
|
||||
|
||||
### Bereitstellung mit Docker
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
|
||||
|
||||
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
|
||||
### Bereitstellung mit Kubernetes und Helm
|
||||
|
||||
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
|
||||
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
|
||||
|
||||
## Vergleiche mit anderen Lösungen
|
||||
|
||||
### SigNoz vs. Prometheus
|
||||
|
||||
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
|
||||
|
||||
Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzelschritt-Fehlersuchen anzubieten, ähnlich wie es SaaS Anbieter wie Datadog tun, mit der Möglichkeit von erweitertem filtern und aggregieren von Fehlersuchen. Etwas, was in Jaeger aktuell fehlt.
|
||||
|
||||
<p>  </p>
|
||||
|
||||
### SigNoz vs. Jaeger
|
||||
|
||||
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
|
||||
|
||||
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
|
||||
|
||||
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
|
||||
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
|
||||
|
||||
## Zum Projekt beitragen
|
||||
|
||||
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
|
||||
|
||||
## Dokumentation
|
||||
|
||||
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
|
||||
|
||||
<br /><br />
|
||||
|
||||
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
|
||||
|
||||
## Community
|
||||
|
||||
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
|
||||
|
||||
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
|
||||
|
||||
Wie immer, danke an unsere großartigen Unterstützer!
|
||||
|
||||
<a href="https://github.com/signoz/signoz/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
17
README.md
17
README.md
@@ -6,7 +6,7 @@
|
||||
|
||||
<p align="center">
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
@@ -16,8 +16,9 @@
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
@@ -32,7 +33,11 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
👉 Run aggregates on trace data to get business relevant metrics
|
||||
|
||||
|
||||

|
||||

|
||||
<br />
|
||||

|
||||
<br />
|
||||

|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -40,7 +45,7 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
|
||||
|
||||
## Join our Slack community
|
||||
|
||||
Come say Hi to us on [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
|
||||
Come say Hi to us on [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -131,7 +136,7 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
|
||||
|
||||
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -147,7 +152,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
|
||||
|
||||
## Community
|
||||
|
||||
Join the [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
|
||||
Join the [slack community](https://signoz.io/slack) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
|
||||
|
||||
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentação</b></a> •
|
||||
<a href="https://bit.ly/signoz-slack"><b>Comunidade no Slack</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Comunidade no Slack</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
@@ -38,7 +38,7 @@ SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problem
|
||||
|
||||
## Junte-se à nossa comunidade no Slack
|
||||
|
||||
Venha dizer oi para nós no [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
|
||||
Venha dizer oi para nós no [Slack](https://signoz.io/slack) 👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -129,7 +129,7 @@ Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger:
|
||||
|
||||
Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz.
|
||||
|
||||
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
|
||||
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://signoz.io/slack)
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -145,7 +145,7 @@ Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver
|
||||
|
||||
## Comunidade
|
||||
|
||||
Junte-se a [comunidade no Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
|
||||
Junte-se a [comunidade no Slack](https://signoz.io/slack) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
|
||||
|
||||
Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
|
||||
|
||||
## 加入我们的Slack社区
|
||||
|
||||
来[Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 跟我们打声招呼👋
|
||||
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -120,7 +120,7 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们
|
||||
|
||||
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
|
||||
|
||||
还不清楚怎么开始? 只需在[slack社区](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)的`#contributing`频道里ping我们。
|
||||
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
|
||||
|
||||
<br /><br />
|
||||
|
||||
@@ -136,7 +136,7 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们
|
||||
|
||||
## 社区
|
||||
|
||||
加入[slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
|
||||
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。
|
||||
|
||||
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。
|
||||
|
||||
|
||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
|
||||
|
||||
## Supported Versions
|
||||
We always recommend using the latest version of SigNoz to ensure you get all security updates
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
|
||||
|
||||
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
|
||||
|
||||
Once we've received your email we'll keep you updated as we fix the vulnerability.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thank you for keeping SigNoz and our users safe. 🙇
|
||||
88
deploy/README.md
Normal file
88
deploy/README.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# Deploy
|
||||
|
||||
Check that you have cloned [signoz/signoz](https://github.com/signoz/signoz)
|
||||
and currently are in `signoz/deploy` folder.
|
||||
|
||||
## Docker
|
||||
|
||||
If you don't have docker set up, please follow [this guide](https://docs.docker.com/engine/install/)
|
||||
to set up docker before proceeding with the next steps.
|
||||
|
||||
### Using Install Script
|
||||
|
||||
Now run the following command to install:
|
||||
|
||||
```sh
|
||||
./install.sh
|
||||
```
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
|
||||
to set up docker compose before proceeding with the next steps.
|
||||
|
||||
For x86 chip (amd):
|
||||
|
||||
```sh
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
```
|
||||
|
||||
For Mac with Apple chip (arm):
|
||||
|
||||
```sh
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
|
||||
the data generated from hotrod in SigNoz UI.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
### Using Helm
|
||||
|
||||
#### Bring up SigNoz cluster
|
||||
|
||||
```sh
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
|
||||
kubectl create ns platform
|
||||
|
||||
helm -n platform install my-release signoz/signoz
|
||||
```
|
||||
|
||||
To access the UI, you can `port-forward` the frontend service:
|
||||
|
||||
```sh
|
||||
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
|
||||
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
|
||||
|
||||
#### Test HotROD application with SigNoz
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
```
|
||||
|
||||
To generate load:
|
||||
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
|
||||
```
|
||||
|
||||
To stop load:
|
||||
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
```
|
||||
|
||||
## Uninstall/Troubleshoot?
|
||||
|
||||
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.
|
||||
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker-swarm/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker-swarm/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
542
deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
Normal file
542
deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
Normal file
@@ -0,0 +1,542 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<console>1</console>
|
||||
</logger>
|
||||
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_ssl_port>9440</tcp_ssl_port>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Example config for tiered storage -->
|
||||
<!-- <storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration> -->
|
||||
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<listen_host>::</listen_host>
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings.. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.yandex/reference_en.html#Distributed
|
||||
-->
|
||||
<remote_servers incl="clickhouse_remote_servers" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
</remote_servers>
|
||||
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/reference_en.html#Data%20replication
|
||||
-->
|
||||
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true<hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
|
||||
<!-- Uncomment if use part_log
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="clickhouse_compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup>
|
||||
<!-- carbon -->
|
||||
<pattern>
|
||||
<regexp>^carbon\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>7776000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- collectd -->
|
||||
<pattern>
|
||||
<regexp>^collectd\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>10</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>43200</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- high -->
|
||||
<pattern>
|
||||
<regexp>^high\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>10</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>172800</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- medium -->
|
||||
<pattern>
|
||||
<regexp>^medium\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- low -->
|
||||
<pattern>
|
||||
<regexp>^low\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>15552000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>31536000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>126144000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>252288000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>315360000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- default -->
|
||||
<default>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
</yandex>
|
||||
139
deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
Normal file
139
deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.8.2
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.8.2
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.45.1-0.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133:13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
resources:
|
||||
limits:
|
||||
memory: 2000m
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.45.1-0.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -19,6 +19,10 @@ CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
@@ -0,0 +1,76 @@
|
||||
receivers:
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
@@ -0,0 +1,47 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
26
deploy/docker-swarm/clickhouse-setup/prometheus.yml
Normal file
26
deploy/docker-swarm/clickhouse-setup/prometheus.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
16
deploy/docker-swarm/common/locust-scripts/locustfile.py
Normal file
16
deploy/docker-swarm/common/locust-scripts/locustfile.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from locust import HttpUser, task, between
|
||||
class UserTasks(HttpUser):
|
||||
wait_time = between(5, 15)
|
||||
|
||||
@task
|
||||
def rachel(self):
|
||||
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
|
||||
@task
|
||||
def trom(self):
|
||||
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
|
||||
@task
|
||||
def japanese(self):
|
||||
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
|
||||
@task
|
||||
def coffee(self):
|
||||
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
|
||||
37
deploy/docker-swarm/common/nginx-config.conf
Normal file
37
deploy/docker-swarm/common/nginx-config.conf
Normal file
@@ -0,0 +1,37 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
0
deploy/docker-swarm/dashboards/.gitkeep
Normal file
0
deploy/docker-swarm/dashboards/.gitkeep
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
35
deploy/docker/clickhouse-setup/alertmanager.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
11
deploy/docker/clickhouse-setup/alerts.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
@@ -1,11 +1,8 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<level>information</level>
|
||||
<console>1</console>
|
||||
</logger>
|
||||
|
||||
<http_port>8123</http_port>
|
||||
@@ -45,6 +42,34 @@
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Example config for tiered storage -->
|
||||
<!-- <storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration> -->
|
||||
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
|
||||
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/data/signoz/.gitkeep
Normal file
133
deploy/docker/clickhouse-setup/docker-compose.arm.yaml
Normal file
133
deploy/docker/clickhouse-setup/docker-compose.arm.yaml
Normal file
@@ -0,0 +1,133 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.8.2
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-arm
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.8.2
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.45.1-0.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133:13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
mem_limit: 2000m
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.45.1-0.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -2,95 +2,124 @@ version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
- 8123:8123
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
image: yandex/clickhouse-server:21.12.3.32
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.0-0.1
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.4.0
|
||||
image: signoz/query-service:0.8.2
|
||||
container_name: query-service
|
||||
command: ["-config=/root/config/prometheus.yml"]
|
||||
ports:
|
||||
- "8080:8080"
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||
- STORAGE=clickhouse
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
- GODEBUG=netdns=go
|
||||
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.0
|
||||
container_name: frontend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.8.2
|
||||
container_name: frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcontribcol:0.4.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
image: signoz/otelcontribcol:0.45.1-0.3
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8889:8889" # Prometheus metrics exposed by the agent
|
||||
# - "13133:13133" # health_check
|
||||
# - "14268:14268" # Jaeger receiver
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zpages extension
|
||||
# - "55680:55680" # OTLP gRPC legacy receiver
|
||||
# - "55681:55681" # OTLP HTTP legacy receiver
|
||||
mem_limit: 2000m
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector-metrics:
|
||||
image: signoz/otelcontribcol:0.45.1-0.3
|
||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
@@ -100,4 +129,4 @@ services:
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
receivers:
|
||||
otlp/spanmetrics:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "localhost:12345"
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
@@ -8,22 +12,38 @@ receivers:
|
||||
grpc:
|
||||
thrift_http:
|
||||
hostmetrics:
|
||||
collection_interval: 10s
|
||||
collection_interval: 30s
|
||||
scrapers:
|
||||
cpu:
|
||||
load:
|
||||
memory:
|
||||
disk:
|
||||
filesystem:
|
||||
network:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
signozspanmetrics/prometheus:
|
||||
metrics_exporter: prometheus
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 10000
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
@@ -32,21 +52,25 @@ extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/?database=signoz_traces
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
processors: [signozspanmetrics/prometheus, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp, hostmetrics]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
exporters: [clickhousemetricswrite]
|
||||
metrics/spanmetrics:
|
||||
receivers: [otlp/spanmetrics]
|
||||
exporters: [prometheus]
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
# Data sources: metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets: ["otel-collector:8889"]
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp, prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite]
|
||||
@@ -9,12 +9,13 @@ alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
server {
|
||||
listen 3000;
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
@@ -9,22 +9,29 @@ server {
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location /api/alertmanager {
|
||||
proxy_pass http://alertmanager:9093/api/v2;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
|
||||
proxy_pass http://query-service:8080/api;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
@@ -1,274 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- coordinator_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_tiny/coordinator
|
||||
- environment_tiny/common
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_tiny/broker
|
||||
- environment_tiny/common
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- historical_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_tiny/historical
|
||||
- environment_tiny/common
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/data
|
||||
- middle_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_tiny/middlemanager
|
||||
- environment_tiny/common
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/var
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_tiny/router
|
||||
- environment_tiny/common
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.0
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.0
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
volumes:
|
||||
metadata_data: {}
|
||||
middle_var: {}
|
||||
historical_var: {}
|
||||
broker_var: {}
|
||||
coordinator_var: {}
|
||||
router_var: {}
|
||||
|
||||
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
|
||||
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
|
||||
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.6.2-debian-10-r100
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
|
||||
|
||||
kafka:
|
||||
# image: wurstmeister/kafka
|
||||
image: bitnami/kafka:2.7.0-debian-10-r1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
hostname: kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka
|
||||
KAFKA_ADVERTISED_PORT: 9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
|
||||
|
||||
healthcheck:
|
||||
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
|
||||
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
postgres:
|
||||
container_name: postgres
|
||||
image: postgres:latest
|
||||
volumes:
|
||||
- metadata_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=FoolishPassword
|
||||
- POSTGRES_USER=druid
|
||||
- POSTGRES_DB=druid
|
||||
|
||||
coordinator:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: coordinator
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- coordinator_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
ports:
|
||||
- "8081:8081"
|
||||
command:
|
||||
- coordinator
|
||||
env_file:
|
||||
- environment_small/coordinator
|
||||
|
||||
broker:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: broker
|
||||
volumes:
|
||||
- broker_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8082:8082"
|
||||
command:
|
||||
- broker
|
||||
env_file:
|
||||
- environment_small/broker
|
||||
|
||||
historical:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: historical
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- historical_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8083:8083"
|
||||
command:
|
||||
- historical
|
||||
env_file:
|
||||
- environment_small/historical
|
||||
|
||||
middlemanager:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: middlemanager
|
||||
volumes:
|
||||
- ./storage:/opt/druid/deepStorage
|
||||
- middle_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8091:8091"
|
||||
command:
|
||||
- middleManager
|
||||
env_file:
|
||||
- environment_small/middlemanager
|
||||
|
||||
router:
|
||||
image: apache/druid:0.20.0
|
||||
container_name: router
|
||||
volumes:
|
||||
- router_var:/opt/druid/data
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- postgres
|
||||
- coordinator
|
||||
ports:
|
||||
- "8888:8888"
|
||||
command:
|
||||
- router
|
||||
env_file:
|
||||
- environment_small/router
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.4.0
|
||||
container_name: flattener-processor
|
||||
|
||||
depends_on:
|
||||
- kafka
|
||||
- otel-collector
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
environment:
|
||||
- KAFKA_BROKER=kafka:9092
|
||||
- KAFKA_INPUT_TOPIC=otlp_spans
|
||||
- KAFKA_OUTPUT_TOPIC=flattened_spans
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.4.0
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
volumes:
|
||||
- ../dashboards:/root/config/dashboards
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.4.0
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: create-supervisor
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
|
||||
volumes:
|
||||
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
|
||||
|
||||
|
||||
set-retention:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
container_name: set-retention
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
depends_on:
|
||||
- router
|
||||
restart: on-failure:6
|
||||
volumes:
|
||||
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
depends_on:
|
||||
kafka:
|
||||
condition: service_healthy
|
||||
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ./locust-scripts:/locust
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=768m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=100MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=1280m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=1g
|
||||
DRUID_XMS=1g
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=2g
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=200MiB
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=2
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=128m
|
||||
DRUID_XMS=128m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/druid/deepStorage
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,52 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,26 +0,0 @@
|
||||
# For S3 storage
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
||||
|
||||
|
||||
# druid_storage_type=s3
|
||||
# druid_storage_bucket=<s3-bucket-name>
|
||||
# druid_storage_baseKey=druid/segments
|
||||
|
||||
# AWS_ACCESS_KEY_ID=<s3-access-id>
|
||||
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
|
||||
# AWS_REGION=<s3-aws-region>
|
||||
|
||||
# druid_indexer_logs_type=s3
|
||||
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
|
||||
# druid_indexer_logs_s3Prefix=druid/indexing-logs
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# For local storage
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
druid_storage_type=local
|
||||
druid_storage_storageDirectory=/opt/data/segments
|
||||
druid_indexer_logs_type=file
|
||||
druid_indexer_logs_directory=/opt/data/indexing-logs
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=512m
|
||||
DRUID_XMS=512m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
druid_processing_buffer_sizeBytes=50MiB
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,50 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=400m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,49 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Java tuning
|
||||
DRUID_XMX=64m
|
||||
DRUID_XMS=64m
|
||||
DRUID_MAXNEWSIZE=256m
|
||||
DRUID_NEWSIZE=256m
|
||||
DRUID_MAXDIRECTMEMORYSIZE=128m
|
||||
|
||||
druid_emitter_logging_logLevel=debug
|
||||
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
|
||||
druid_zk_service_host=zookeeper
|
||||
|
||||
druid_metadata_storage_host=
|
||||
druid_metadata_storage_type=postgresql
|
||||
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
|
||||
druid_metadata_storage_connector_user=druid
|
||||
druid_metadata_storage_connector_password=FoolishPassword
|
||||
|
||||
druid_coordinator_balancer_strategy=cachingCost
|
||||
|
||||
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
|
||||
|
||||
|
||||
druid_processing_numThreads=1
|
||||
druid_processing_numMergeBuffers=2
|
||||
|
||||
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
|
||||
@@ -1,51 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -36,6 +36,10 @@ is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
is_arm64(){
|
||||
[[ `uname -m` == 'arm64' ]]
|
||||
}
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
package_manager="brew"
|
||||
@@ -98,7 +102,7 @@ check_os() {
|
||||
# The script should error out in case they aren't available
|
||||
check_ports_occupied() {
|
||||
local port_check_output
|
||||
local ports_pattern="80|3000|8080"
|
||||
local ports_pattern="3301|4317"
|
||||
|
||||
if is_mac; then
|
||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||
@@ -112,18 +116,10 @@ check_ports_occupied() {
|
||||
fi
|
||||
|
||||
if [[ -n $port_check_output ]]; then
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
send_event "port_not_available"
|
||||
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
|
||||
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
@@ -137,57 +133,44 @@ install_docker() {
|
||||
|
||||
|
||||
if [[ $package_manager == apt-get ]]; then
|
||||
apt_cmd="sudo apt-get --yes --quiet"
|
||||
apt_cmd="$sudo_cmd apt-get --yes --quiet"
|
||||
$apt_cmd update
|
||||
$apt_cmd install software-properties-common gnupg-agent
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
|
||||
sudo add-apt-repository \
|
||||
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
|
||||
$sudo_cmd add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
|
||||
$apt_cmd update
|
||||
echo "Installing docker"
|
||||
$apt_cmd install docker-ce docker-ce-cli containerd.io
|
||||
elif [[ $package_manager == zypper ]]; then
|
||||
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
|
||||
zypper_cmd="$sudo_cmd zypper --quiet --no-gpg-checks --non-interactive"
|
||||
echo "Installing docker"
|
||||
if [[ $os == sles ]]; then
|
||||
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
|
||||
os_arch="$(uname -i)"
|
||||
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||
SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
|
||||
fi
|
||||
$zypper_cmd install docker docker-runc containerd
|
||||
sudo systemctl enable docker.service
|
||||
$sudo_cmd systemctl enable docker.service
|
||||
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
|
||||
echo
|
||||
echo "Amazon Linux detected ... "
|
||||
echo
|
||||
sudo yum install docker
|
||||
sudo service docker start
|
||||
# yum install docker
|
||||
# service docker start
|
||||
$sudo_cmd yum install -y amazon-linux-extras
|
||||
$sudo_cmd amazon-linux-extras enable docker
|
||||
$sudo_cmd yum install -y docker
|
||||
else
|
||||
|
||||
yum_cmd="sudo yum --assumeyes --quiet"
|
||||
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
|
||||
$yum_cmd install yum-utils
|
||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
install_docker_machine() {
|
||||
|
||||
echo "\nInstalling docker machine ..."
|
||||
|
||||
if [[ $os == "Mac" ]];then
|
||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
|
||||
chmod +x /usr/local/bin/docker-machine
|
||||
else
|
||||
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
|
||||
chmod +x /tmp/docker-machine
|
||||
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
||||
|
||||
fi
|
||||
|
||||
|
||||
}
|
||||
|
||||
install_docker_compose() {
|
||||
@@ -195,22 +178,14 @@ install_docker_compose() {
|
||||
if [[ ! -f /usr/bin/docker-compose ]];then
|
||||
echo "++++++++++++++++++++++++"
|
||||
echo "Installing docker-compose"
|
||||
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
$sudo_cmd chmod +x /usr/local/bin/docker-compose
|
||||
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
|
||||
echo "docker-compose installed!"
|
||||
echo ""
|
||||
fi
|
||||
else
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
send_event "docker_compose_not_found"
|
||||
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
|
||||
@@ -221,35 +196,32 @@ install_docker_compose() {
|
||||
}
|
||||
|
||||
start_docker() {
|
||||
echo "Starting Docker ..."
|
||||
if [ $os = "Mac" ]; then
|
||||
echo -e "🐳 Starting Docker ...\n"
|
||||
if [[ $os == "Mac" ]]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
if ! sudo systemctl is-active docker.service > /dev/null; then
|
||||
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||
echo "Starting docker service"
|
||||
sudo systemctl start docker.service
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
docker ps > /dev/null && true
|
||||
if [[ $? -ne 0 ]]; then
|
||||
request_sudo
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
wait_for_containers_start() {
|
||||
local timeout=$1
|
||||
|
||||
# The while loop is important because for-loops don't work for dynamic values
|
||||
while [[ $timeout -gt 0 ]]; do
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)"
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
if [ $setup_type == 'druid' ]; then
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
||||
|
||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
||||
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
||||
fi
|
||||
((timeout--))
|
||||
@@ -260,39 +232,33 @@ wait_for_containers_start() {
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
if [ "$?" -ne 0 ]; then
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
set +o errexit
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
if is_arm64; then
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
|
||||
else
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
fi
|
||||
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
while [[ $email == "" ]]
|
||||
do
|
||||
if [[ $email == "" ]]; then
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
while [[ $email == "" ]]
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
fi
|
||||
|
||||
send_event "installation_support"
|
||||
|
||||
|
||||
echo ""
|
||||
echo -e "\nWe will reach out to you at the email provided shortly, Exiting for now. Bye! 👋 \n"
|
||||
@@ -300,126 +266,221 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
fi
|
||||
}
|
||||
|
||||
request_sudo() {
|
||||
if hash sudo 2>/dev/null; then
|
||||
echo -e "\n\n🙇 We will need sudo access to complete the installation."
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompt."
|
||||
$sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
echo -e "Got it! Thanks!! 🙏\n"
|
||||
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
echo "🟡 Running installer with non-sudo permissions."
|
||||
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
|
||||
echo ""
|
||||
else
|
||||
sudo_cmd="sudo"
|
||||
fi
|
||||
|
||||
# Checking OS and assigning package manager
|
||||
desired_os=0
|
||||
os=""
|
||||
echo -e "Detecting your OS ..."
|
||||
email=""
|
||||
echo -e "🌏 Detecting your OS ...\n"
|
||||
check_os
|
||||
|
||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
||||
|
||||
echo ""
|
||||
|
||||
echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
echo -e "${RED}2) Kafka + Druid as datastore \n"
|
||||
read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
do
|
||||
# echo $choice_setup
|
||||
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# echo $choice_setup
|
||||
done
|
||||
|
||||
if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
setup_type='clickhouse'
|
||||
else
|
||||
setup_type='druid'
|
||||
# Obtain unique installation id
|
||||
sysinfo="$(uname -a)"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
uuid="$(uuidgen)"
|
||||
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
|
||||
fi
|
||||
|
||||
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
digest_cmd=""
|
||||
if hash shasum 2>/dev/null; then
|
||||
digest_cmd="shasum -a 256"
|
||||
elif hash sha256sum 2>/dev/null; then
|
||||
digest_cmd="sha256sum"
|
||||
elif hash openssl 2>/dev/null; then
|
||||
digest_cmd="openssl dgst -sha256"
|
||||
fi
|
||||
|
||||
if [[ -z $digest_cmd ]]; then
|
||||
SIGNOZ_INSTALLATION_ID="$sysinfo"
|
||||
else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
URL="https://api.segment.io/v1/track"
|
||||
HEADER_1="Content-Type: application/json"
|
||||
HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
send_event() {
|
||||
error=""
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
case "$1" in
|
||||
'install_started')
|
||||
event="Installation Started"
|
||||
;;
|
||||
'os_not_supported')
|
||||
event="Installation Error"
|
||||
error="OS Not Supported"
|
||||
;;
|
||||
'docker_not_installed')
|
||||
event="Installation Error"
|
||||
error="Docker not installed"
|
||||
;;
|
||||
'docker_compose_not_found')
|
||||
event="Installation Error"
|
||||
event="Docker Compose not found"
|
||||
;;
|
||||
'port_not_available')
|
||||
event="Installation Error"
|
||||
error="port not available"
|
||||
;;
|
||||
'installation_error_checks')
|
||||
event="Installation Error - Checks"
|
||||
error="Containers not started"
|
||||
others='"data": "some_checks",'
|
||||
;;
|
||||
'installation_support')
|
||||
event="Installation Support"
|
||||
others='"email": "'"$email"'",'
|
||||
;;
|
||||
'installation_success')
|
||||
event="Installation Success"
|
||||
;;
|
||||
'identify_successful_installation')
|
||||
event="Identify Successful Installation"
|
||||
others='"email": "'"$email"'",'
|
||||
;;
|
||||
*)
|
||||
print_error "unknown event type: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
if [[ $desired_os -eq 0 ]];then
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
if [[ "$error" != "" ]]; then
|
||||
error='"error": "'"$error"'", '
|
||||
fi
|
||||
|
||||
DATA='{ "anonymousId": "'"$SIGNOZ_INSTALLATION_ID"'", "event": "'"$event"'", "properties": { "os": "'"$os"'", '"$error $others"' "setup_type": "'"$setup_type"'" } }'
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
send_event "install_started"
|
||||
|
||||
if [[ $desired_os -eq 0 ]]; then
|
||||
send_event "os_not_supported"
|
||||
fi
|
||||
|
||||
# check_ports_occupied
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
|
||||
request_sudo
|
||||
install_docker
|
||||
else
|
||||
# enable docker without sudo from next reboot
|
||||
sudo usermod -aG docker "${USER}"
|
||||
elif is_mac; then
|
||||
echo ""
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/docker-for-mac/install/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
send_event "docker_not_installed"
|
||||
exit 1
|
||||
else
|
||||
echo ""
|
||||
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
|
||||
echo "Docker must be installed manually on your machine to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/get-docker/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
send_event "docker_not_installed"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install docker-compose
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
|
||||
|
||||
|
||||
start_docker
|
||||
|
||||
|
||||
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
if is_arm64; then
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
fi
|
||||
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
if is_arm64; then
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
|
||||
wait_for_containers_start 60
|
||||
@@ -429,64 +490,37 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
|
||||
else
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
|
||||
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
|
||||
fi
|
||||
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
send_event "installation_error_checks"
|
||||
exit 1
|
||||
|
||||
else
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
send_event "installation_success"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "🟢 Your installation is complete!"
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3000"
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
if is_arm64; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "👉 Need help Getting Started?"
|
||||
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo -e "Join us on Slack https://signoz.io/slack"
|
||||
echo ""
|
||||
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
|
||||
read -rp 'Email: ' email
|
||||
@@ -496,16 +530,7 @@ else
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
send_event "identify_successful_installation"
|
||||
fi
|
||||
|
||||
echo -e "\n🙏 Thank you!\n"
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: retention-config
|
||||
data:
|
||||
retention-spec.json: |
|
||||
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: set-retention
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: set-retention
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
volumeMounts:
|
||||
- name: retention-config-volume
|
||||
mountPath: /app/retention-spec.json
|
||||
subPath: retention-spec.json
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://signoz-druid-router:8888/druid/coordinator/v1/rules/flattened_spans"
|
||||
|
||||
volumes:
|
||||
- name: retention-config-volume
|
||||
configMap:
|
||||
name: retention-config
|
||||
|
||||
restartPolicy: Never
|
||||
backoffLimit: 8
|
||||
@@ -1,76 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: supervisor-config
|
||||
data:
|
||||
supervisor-spec.json: |
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "signoz-kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: create-supervisor
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: create-supervisor
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
volumeMounts:
|
||||
- name: supervisor-config-volume
|
||||
mountPath: /app/supervisor-spec.json
|
||||
subPath: supervisor-spec.json
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://signoz-druid-router:8888/druid/indexer/v1/supervisor"
|
||||
volumes:
|
||||
- name: supervisor-config-volume
|
||||
configMap:
|
||||
name: supervisor-config
|
||||
restartPolicy: Never
|
||||
backoffLimit: 8
|
||||
@@ -1,60 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: otel-collector-conf
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-conf
|
||||
data:
|
||||
otel-collector-config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
queued_retry:
|
||||
num_workers: 4
|
||||
queue_size: 100
|
||||
retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- signoz-kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- signoz-kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -1,72 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: otel-collector
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
minReadySeconds: 5
|
||||
progressDeadlineSeconds: 120
|
||||
replicas: 1 #TODO - adjust this to your own requirements
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- "/otelcol"
|
||||
- "--config=/conf/otel-collector-config.yaml"
|
||||
# Memory Ballast size should be max 1/3 to 1/2 of memory.
|
||||
- "--mem-ballast-size-mib=683"
|
||||
image: otel/opentelemetry-collector:0.18.0
|
||||
name: otel-collector
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 400Mi
|
||||
ports:
|
||||
- containerPort: 55679 # Default endpoint for ZPages.
|
||||
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
|
||||
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
|
||||
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
|
||||
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
|
||||
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
|
||||
- containerPort: 9411 # Default endpoint for Zipkin receiver.
|
||||
- containerPort: 8888 # Default endpoint for querying metrics.
|
||||
volumeMounts:
|
||||
- name: otel-collector-config-vol
|
||||
mountPath: /conf
|
||||
# - name: otel-collector-secrets
|
||||
# mountPath: /secrets
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133 # Health Check extension default port.
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133 # Health Check extension default port.
|
||||
volumes:
|
||||
- configMap:
|
||||
name: otel-collector-conf
|
||||
items:
|
||||
- key: otel-collector-config
|
||||
path: otel-collector-config.yaml
|
||||
name: otel-collector-config-vol
|
||||
# - secret:
|
||||
# name: otel-collector-secrets
|
||||
# items:
|
||||
# - key: cert.pem
|
||||
# path: cert.pem
|
||||
# - key: key.pem
|
||||
# path: key.pem
|
||||
@@ -1,31 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: otel-collector
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
spec:
|
||||
ports:
|
||||
- name: otlp # Default endpoint for OpenTelemetry receiver.
|
||||
port: 55680
|
||||
protocol: TCP
|
||||
targetPort: 55680
|
||||
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
|
||||
port: 55681
|
||||
protocol: TCP
|
||||
targetPort: 55681
|
||||
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
|
||||
port: 4317
|
||||
protocol: TCP
|
||||
targetPort: 4317
|
||||
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
|
||||
port: 14250
|
||||
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
|
||||
port: 14268
|
||||
- name: zipkin # Default endpoint for Zipkin receiver.
|
||||
port: 9411
|
||||
- name: metrics # Default endpoint for querying metrics.
|
||||
port: 8888
|
||||
selector:
|
||||
component: otel-collector
|
||||
@@ -1,21 +0,0 @@
|
||||
dependencies:
|
||||
- name: zookeeper
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 6.0.0
|
||||
- name: kafka
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 12.0.0
|
||||
- name: druid
|
||||
repository: https://charts.helm.sh/incubator
|
||||
version: 0.2.18
|
||||
- name: flattener-processor
|
||||
repository: file://./signoz-charts/flattener-processor
|
||||
version: 0.3.6
|
||||
- name: query-service
|
||||
repository: file://./signoz-charts/query-service
|
||||
version: 0.3.6
|
||||
- name: frontend
|
||||
repository: file://./signoz-charts/frontend
|
||||
version: 0.3.6
|
||||
digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228
|
||||
generated: "2021-08-23T12:06:37.231066+05:30"
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: signoz-platform
|
||||
description: SigNoz Observability Platform Helm Chart
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 0.3.2
|
||||
|
||||
dependencies:
|
||||
- name: zookeeper
|
||||
repository: "https://charts.bitnami.com/bitnami"
|
||||
version: 6.0.0
|
||||
- name: kafka
|
||||
repository: "https://charts.bitnami.com/bitnami"
|
||||
version: 12.0.0
|
||||
- name: druid
|
||||
repository: "https://charts.helm.sh/incubator"
|
||||
version: 0.2.18
|
||||
- name: flattener-processor
|
||||
repository: "file://./signoz-charts/flattener-processor"
|
||||
version: 0.3.6
|
||||
- name: query-service
|
||||
repository: "file://./signoz-charts/query-service"
|
||||
version: 0.3.6
|
||||
- name: frontend
|
||||
repository: "file://./signoz-charts/frontend"
|
||||
version: 0.3.6
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: flattener-processor
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.6
|
||||
@@ -1,21 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flattener-processor.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flattener-processor.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flattener-processor.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flattener-processor.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,63 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "flattener-processor.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "flattener-processor.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "flattener-processor.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "flattener-processor.labels" -}}
|
||||
helm.sh/chart: {{ include "flattener-processor.chart" . }}
|
||||
{{ include "flattener-processor.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "flattener-processor.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "flattener-processor.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "flattener-processor.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "flattener-processor.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,65 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flattener-processor.fullname" . }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "flattener-processor.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "flattener-processor.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "flattener-processor.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/root/flattener"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: KAFKA_BROKER
|
||||
value: {{ .Values.configVars.KAFKA_BROKER }}
|
||||
- name: KAFKA_INPUT_TOPIC
|
||||
value: {{ .Values.configVars.KAFKA_INPUT_TOPIC }}
|
||||
- name: KAFKA_OUTPUT_TOPIC
|
||||
value: {{ .Values.configVars.KAFKA_OUTPUT_TOPIC }}
|
||||
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "flattener-processor.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "flattener-processor.fullname" . }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "flattener-processor.selectorLabels" . | nindent 4 }}
|
||||
@@ -1,12 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "flattener-processor.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "flattener-processor.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "flattener-processor.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "flattener-processor.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
@@ -1,74 +0,0 @@
|
||||
# Default values for flattener-processor.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: signoz/flattener-processor
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
|
||||
configVars:
|
||||
KAFKA_BROKER: signoz-kafka:9092
|
||||
KAFKA_INPUT_TOPIC: otlp_spans
|
||||
KAFKA_OUTPUT_TOPIC: flattened_spans
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8080
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths: []
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: frontend
|
||||
description: A Helm chart for SigNoz Frontend Service
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.6
|
||||
@@ -1,21 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "frontend.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "frontend.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "frontend.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "frontend.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,63 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "frontend.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "frontend.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "frontend.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "frontend.labels" -}}
|
||||
helm.sh/chart: {{ include "frontend.chart" . }}
|
||||
{{ include "frontend.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "frontend.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "frontend.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "frontend.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "frontend.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,37 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.config.name }}
|
||||
labels:
|
||||
release: {{ .Release.Name }}
|
||||
data:
|
||||
default.conf: |-
|
||||
server {
|
||||
listen {{ .Values.service.port }};
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
location /api {
|
||||
proxy_pass http://{{ .Values.config.queryServiceUrl }}/api;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "frontend.fullname" . }}
|
||||
labels:
|
||||
{{- include "frontend.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "frontend.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "frontend.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
volumes:
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
name: {{ .Values.config.name }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: REACT_APP_QUERY_SERVICE_URL
|
||||
value: {{ .Values.configVars.REACT_APP_QUERY_SERVICE_URL }}
|
||||
volumeMounts:
|
||||
- name: nginx-config
|
||||
mountPath: /etc/nginx/conf.d
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "frontend.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "frontend.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "frontend.fullname" . }}
|
||||
labels:
|
||||
{{- include "frontend.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "frontend.selectorLabels" . | nindent 4 }}
|
||||
@@ -1,12 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "frontend.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "frontend.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "frontend.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "frontend.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "frontend.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
@@ -1,75 +0,0 @@
|
||||
# Default values for frontend.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: signoz/frontend
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
configVars: {}
|
||||
|
||||
config:
|
||||
name: signoz-nginx-config
|
||||
queryServiceUrl: signoz-query-service:8080
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 3000
|
||||
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths: []
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: query-service
|
||||
description: A Helm chart for running SigNoz Query Service in Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.3.6
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user