Compare commits
662 Commits
v0.76.1
...
fix/extrac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
603d6d6fce | ||
|
|
339d81a240 | ||
|
|
2acee366a0 | ||
|
|
76e8672ee5 | ||
|
|
64c0bc1b97 | ||
|
|
9d3733ed72 | ||
|
|
72ec4a1b1f | ||
|
|
f5bf8f9f70 | ||
|
|
365cbdd835 | ||
|
|
d26efd2833 | ||
|
|
0e3ac2a179 | ||
|
|
30bf3a53f5 | ||
|
|
249f8be845 | ||
|
|
9c952942ad | ||
|
|
dac46d82ff | ||
|
|
802ce6de01 | ||
|
|
6853f0c99d | ||
|
|
3f8a2870e4 | ||
|
|
5fa70ea802 | ||
|
|
3a952fa330 | ||
|
|
6d97db1d9d | ||
|
|
5412e7f70b | ||
|
|
8e5cb9046d | ||
|
|
760eabb2dc | ||
|
|
35ddaaa2fc | ||
|
|
a51ee66c02 | ||
|
|
75d189162b | ||
|
|
0dd085c48e | ||
|
|
932918e3a4 | ||
|
|
aa3bc16dcb | ||
|
|
b5098e00a3 | ||
|
|
20dc561bfe | ||
|
|
99bbb87738 | ||
|
|
f1ce93171c | ||
|
|
531a0a12dd | ||
|
|
92794389d6 | ||
|
|
bd02848623 | ||
|
|
b5016b061b | ||
|
|
c308e8668c | ||
|
|
41ee4176ad | ||
|
|
994663110d | ||
|
|
3a2eab2019 | ||
|
|
01202b5800 | ||
|
|
2901e052ae | ||
|
|
372372694e | ||
|
|
8e5b1be106 | ||
|
|
301d9ca4dd | ||
|
|
f350b0e2f0 | ||
|
|
090538f11f | ||
|
|
db13f85a3c | ||
|
|
3d874c22b0 | ||
|
|
e68ce11183 | ||
|
|
587b0ef6c4 | ||
|
|
bb6c366031 | ||
|
|
5c1f070d8f | ||
|
|
1ce150d4b0 | ||
|
|
f6d96c2118 | ||
|
|
ff3235bd02 | ||
|
|
8e9a1b34cb | ||
|
|
3d80a03f8a | ||
|
|
1c650c3c23 | ||
|
|
6b1d62ba8f | ||
|
|
3c53ba308f | ||
|
|
f2abddd2ed | ||
|
|
f53a13e7fa | ||
|
|
b69ac637c3 | ||
|
|
a3c039006f | ||
|
|
2141b1b90a | ||
|
|
771ba45d01 | ||
|
|
7df5c33ce9 | ||
|
|
537c95e05a | ||
|
|
7d9e0523c9 | ||
|
|
360285ef33 | ||
|
|
c17241272f | ||
|
|
fa936a7e0d | ||
|
|
498d398ea3 | ||
|
|
8b2ed674a4 | ||
|
|
9a06603ff3 | ||
|
|
4888491a79 | ||
|
|
7c9f05c2cc | ||
|
|
160802fe11 | ||
|
|
86057cad9f | ||
|
|
210393e281 | ||
|
|
e96ed433fe | ||
|
|
52636284fc | ||
|
|
2639f975ee | ||
|
|
f9db796489 | ||
|
|
65018abc4a | ||
|
|
43706f877a | ||
|
|
d7fdbcd90d | ||
|
|
db0f362482 | ||
|
|
db440a6eb4 | ||
|
|
76b58b7317 | ||
|
|
bba3e95914 | ||
|
|
78df27a140 | ||
|
|
b40fda02cf | ||
|
|
c9e8114b5e | ||
|
|
d712dc1f28 | ||
|
|
7cdff13343 | ||
|
|
08db2febe1 | ||
|
|
a576982497 | ||
|
|
55eadf914b | ||
|
|
b91407416b | ||
|
|
24d6d83575 | ||
|
|
fe95ee716a | ||
|
|
b053ce23cd | ||
|
|
57febd2f52 | ||
|
|
ba6a1c594b | ||
|
|
6afdecbd0f | ||
|
|
41661a5e28 | ||
|
|
507dc86af2 | ||
|
|
ff3bb04655 | ||
|
|
31c4f800fc | ||
|
|
51c2bbcd4b | ||
|
|
5610cb1f81 | ||
|
|
478d28eda1 | ||
|
|
ebb2f1fd63 | ||
|
|
629e502703 | ||
|
|
cf4e44d341 | ||
|
|
7ce1a1cbca | ||
|
|
e2253ec7c0 | ||
|
|
86be2869a9 | ||
|
|
9ec503e302 | ||
|
|
77ee201bb7 | ||
|
|
168a7baf6c | ||
|
|
c36c492877 | ||
|
|
15332b90c1 | ||
|
|
53b71d7062 | ||
|
|
5e0bf930d6 | ||
|
|
d6eed8e79d | ||
|
|
6137740907 | ||
|
|
1aa6c98822 | ||
|
|
8cf511cdb9 | ||
|
|
a7ce6da7d1 | ||
|
|
ddb08b3883 | ||
|
|
893b11c4a0 | ||
|
|
b7025af703 | ||
|
|
552d44d208 | ||
|
|
497315579f | ||
|
|
bfaac15ccb | ||
|
|
5e18be6a23 | ||
|
|
1793706f87 | ||
|
|
da2a3c738a | ||
|
|
d17dab9a1d | ||
|
|
88b75d4e72 | ||
|
|
6327ab5ec6 | ||
|
|
5b09490ad7 | ||
|
|
b50127b567 | ||
|
|
ba2ed3ad22 | ||
|
|
eb3dfbf63b | ||
|
|
c3e048470d | ||
|
|
4563ff0e62 | ||
|
|
c9e48b6de9 | ||
|
|
06ef9ff384 | ||
|
|
26d55875f5 | ||
|
|
b1864ee328 | ||
|
|
8b62c8dced | ||
|
|
273452352d | ||
|
|
8274ebfe37 | ||
|
|
7d5e14abb6 | ||
|
|
7c17ac42b1 | ||
|
|
74ee7bb2c7 | ||
|
|
2f5640b2e6 | ||
|
|
121debcecc | ||
|
|
ff13504a74 | ||
|
|
d4e373443b | ||
|
|
3ccf822d67 | ||
|
|
0e270e6f51 | ||
|
|
749df2a979 | ||
|
|
9ee5d5d599 | ||
|
|
4940dfd46f | ||
|
|
79a31cc205 | ||
|
|
5102cf2b7b | ||
|
|
9ec5594648 | ||
|
|
b6c2ebd6d7 | ||
|
|
9a3a8c8305 | ||
|
|
2ac45b0174 | ||
|
|
2a53918ebd | ||
|
|
9daefeb881 | ||
|
|
526cf01cb7 | ||
|
|
cd4766ec2b | ||
|
|
2196b58d36 | ||
|
|
53c58b9983 | ||
|
|
d174038dce | ||
|
|
78d09e2940 | ||
|
|
6cb7f152e1 | ||
|
|
f6730d3d09 | ||
|
|
899a6ab70a | ||
|
|
a4b852bb99 | ||
|
|
92cd108c0d | ||
|
|
34c116fc7e | ||
|
|
250646a354 | ||
|
|
00191d5774 | ||
|
|
525a0d7a1a | ||
|
|
564edc7430 | ||
|
|
78f396b94a | ||
|
|
9e53c150b8 | ||
|
|
f80a6c3014 | ||
|
|
1eff6d82c9 | ||
|
|
f138eff26c | ||
|
|
50f3fc0ff9 | ||
|
|
ebcb172614 | ||
|
|
133c0deaa8 | ||
|
|
35e8165463 | ||
|
|
6d009c6607 | ||
|
|
f0994e52c0 | ||
|
|
7f5b388722 | ||
|
|
b11a4c0c21 | ||
|
|
bbb21f608f | ||
|
|
50a5b88708 | ||
|
|
5601c0886d | ||
|
|
5b342b9b5d | ||
|
|
7ec59c3c77 | ||
|
|
a12990f0bd | ||
|
|
1ee1ca7951 | ||
|
|
3b1bf34d3e | ||
|
|
fbcff29fae | ||
|
|
81fcca3bd3 | ||
|
|
4f7d84aa37 | ||
|
|
8f8dedb8b3 | ||
|
|
3f65229506 | ||
|
|
f006260719 | ||
|
|
3fc8f6c353 | ||
|
|
e02ae9a5c4 | ||
|
|
1989d07e52 | ||
|
|
78194ae955 | ||
|
|
da1b6d1ed0 | ||
|
|
d3c76ae8be | ||
|
|
bed3dbc698 | ||
|
|
66affb0ece | ||
|
|
75f62372ae | ||
|
|
a3ac307b4e | ||
|
|
7672d2f636 | ||
|
|
e3018d9529 | ||
|
|
385ee268e3 | ||
|
|
01036a8a2f | ||
|
|
1542b9d6e9 | ||
|
|
8455349459 | ||
|
|
c488a24d09 | ||
|
|
9091cf61fd | ||
|
|
eeb2ab3212 | ||
|
|
3f128f0f1d | ||
|
|
59ff7ed1e1 | ||
|
|
d236b6ce1e | ||
|
|
44b118a212 | ||
|
|
3fc6f7ee63 | ||
|
|
f1016baf03 | ||
|
|
e5c0d9e44a | ||
|
|
e51056c804 | ||
|
|
7d8dad4550 | ||
|
|
c477e0ef16 | ||
|
|
fff7f8fc76 | ||
|
|
8cfeef4521 | ||
|
|
d85a1a21ac | ||
|
|
17f48d656d | ||
|
|
2d6774da68 | ||
|
|
62a9d7e602 | ||
|
|
3a2c7a7a68 | ||
|
|
33e70d1f37 | ||
|
|
85f04e4bae | ||
|
|
53f9e7d811 | ||
|
|
ad46e22561 | ||
|
|
e79195ccf1 | ||
|
|
f77bb888a8 | ||
|
|
baa15baea9 | ||
|
|
316e6821f1 | ||
|
|
a1fa2769e4 | ||
|
|
decb660992 | ||
|
|
0acbcf8322 | ||
|
|
11eabdc2ac | ||
|
|
eb94554f5a | ||
|
|
e8280dbea4 | ||
|
|
44ea237039 | ||
|
|
72b0214d1d | ||
|
|
386a215324 | ||
|
|
ba0ba4bbc9 | ||
|
|
d60c9ab36b | ||
|
|
90770b90bd | ||
|
|
a19874c1dd | ||
|
|
65ff460d63 | ||
|
|
b9d542a294 | ||
|
|
e75e5bdbdb | ||
|
|
0d03203977 | ||
|
|
28f6f42ac4 | ||
|
|
92f8e4d5b9 | ||
|
|
037eea5262 | ||
|
|
cd4df6280f | ||
|
|
ad2d4ed56c | ||
|
|
7955497a8d | ||
|
|
6ed30318bd | ||
|
|
c32dd9f17e | ||
|
|
c58cf67eb0 | ||
|
|
440c3d8386 | ||
|
|
d683b94344 | ||
|
|
6a629623bc | ||
|
|
982688ccc9 | ||
|
|
74bbb26033 | ||
|
|
3bb9e05681 | ||
|
|
61b2f8cb31 | ||
|
|
9d397d0867 | ||
|
|
5fb4206a99 | ||
|
|
dd11ba9f48 | ||
|
|
f9cb9f10be | ||
|
|
b6180f6957 | ||
|
|
51d3ca16f7 | ||
|
|
91cbd17275 | ||
|
|
68effaf232 | ||
|
|
c08d1bccaf | ||
|
|
1d77780c70 | ||
|
|
80ded899c7 | ||
|
|
4733af974e | ||
|
|
1ab6c7177f | ||
|
|
c3123a4fa4 | ||
|
|
5a602bbeb7 | ||
|
|
f487f088bd | ||
|
|
1cb01e8dd2 | ||
|
|
595a500be4 | ||
|
|
bec52c3d3e | ||
|
|
0a6a7ba729 | ||
|
|
3d758d4358 | ||
|
|
9c8435119d | ||
|
|
d732f8ba42 | ||
|
|
83b8eaf623 | ||
|
|
ae7364f098 | ||
|
|
0ec1be1ddf | ||
|
|
93de4681a9 | ||
|
|
69e94cbd38 | ||
|
|
62810428d8 | ||
|
|
b921a2280b | ||
|
|
8990fb7a73 | ||
|
|
aaeffae1bd | ||
|
|
d1d7da6c9b | ||
|
|
28a01bf042 | ||
|
|
fb1f320346 | ||
|
|
4d484b225f | ||
|
|
3a396602a8 | ||
|
|
650cf81329 | ||
|
|
cdbf23d053 | ||
|
|
3ca3db2567 | ||
|
|
0925ae73a9 | ||
|
|
cffa511cf3 | ||
|
|
2ba693f040 | ||
|
|
403630ad31 | ||
|
|
93ca3fee33 | ||
|
|
b1c78c2f12 | ||
|
|
7feb94e5eb | ||
|
|
47dc2b98f1 | ||
|
|
f4dc2a8fb8 | ||
|
|
77d1492aac | ||
|
|
6090a6be6e | ||
|
|
eabddf87d2 | ||
|
|
9e13245d1b | ||
|
|
a1c7a948fa | ||
|
|
0bfe53a93c | ||
|
|
16140991be | ||
|
|
aadf2a3ac7 | ||
|
|
824302be38 | ||
|
|
8d4c4dc5f2 | ||
|
|
91fae8c0f3 | ||
|
|
4bbe8c0ee7 | ||
|
|
0f7d226b9b | ||
|
|
e03342e001 | ||
|
|
57f96574ff | ||
|
|
354e4b4b8f | ||
|
|
d7102f69a9 | ||
|
|
040c45b144 | ||
|
|
207d7602ab | ||
|
|
018346ca18 | ||
|
|
7290ab3602 | ||
|
|
88239cec4d | ||
|
|
10ba0e6b4f | ||
|
|
88e1e42bf0 | ||
|
|
a0d896557e | ||
|
|
2b28c5f2e2 | ||
|
|
6dbcc5fb9d | ||
|
|
175e9a4c5e | ||
|
|
33506cafce | ||
|
|
e34e61a20d | ||
|
|
da084b4686 | ||
|
|
6821efeb99 | ||
|
|
c5d5c84a0e | ||
|
|
9c298e83a5 | ||
|
|
9383b6576d | ||
|
|
f10f7a806f | ||
|
|
03600f4d6f | ||
|
|
9fbf111976 | ||
|
|
b8dff86a56 | ||
|
|
f525647b40 | ||
|
|
0a2b7ca1d8 | ||
|
|
16938c6cc0 | ||
|
|
81b8f93177 | ||
|
|
96cfb607d1 | ||
|
|
f526e887cc | ||
|
|
03ab6e704b | ||
|
|
9c0134da54 | ||
|
|
175b059268 | ||
|
|
dfca5b13c0 | ||
|
|
ad392e81ff | ||
|
|
92ceefccee | ||
|
|
9cc4e1b56f | ||
|
|
3758ee7451 | ||
|
|
02b605d109 | ||
|
|
eb86aabf3e | ||
|
|
8810693bda | ||
|
|
6334e09a60 | ||
|
|
1d379931b2 | ||
|
|
815a6d13c5 | ||
|
|
59af9d1c2f | ||
|
|
19d24da147 | ||
|
|
cd1c9ddf11 | ||
|
|
7ff3286c9c | ||
|
|
27830742f9 | ||
|
|
39f07e7477 | ||
|
|
0ab50da7b0 | ||
|
|
c03541cd6c | ||
|
|
727a039eb9 | ||
|
|
c7db85f44c | ||
|
|
08d9a74055 | ||
|
|
503e4cdf00 | ||
|
|
224f952da7 | ||
|
|
0c28067f89 | ||
|
|
8dc749b9dd | ||
|
|
82a111e5b1 | ||
|
|
e2e6c65b4d | ||
|
|
f01d21cbf2 | ||
|
|
36886135d1 | ||
|
|
3648027576 | ||
|
|
b80626f5e2 | ||
|
|
08579242eb | ||
|
|
6e0b50dd60 | ||
|
|
76ed58c481 | ||
|
|
f4d029bd12 | ||
|
|
b66af786e6 | ||
|
|
5ad68a3310 | ||
|
|
0f0693f6eb | ||
|
|
16e3c185e9 | ||
|
|
8d6671e362 | ||
|
|
5b237ee628 | ||
|
|
cb08ce5e5d | ||
|
|
3fbc3dec48 | ||
|
|
5b2f897a00 | ||
|
|
73f57d8bee | ||
|
|
ab17bf3558 | ||
|
|
eb5a1b76b8 | ||
|
|
130ff925bd | ||
|
|
75d86cea60 | ||
|
|
cf451d335c | ||
|
|
e47c7cc17b | ||
|
|
629c54d3f9 | ||
|
|
ed3026eeb5 | ||
|
|
ccf26883c4 | ||
|
|
958924befe | ||
|
|
b70c570cdc | ||
|
|
42a026469b | ||
|
|
6de0908a62 | ||
|
|
fd21a4955e | ||
|
|
3dce13d29f | ||
|
|
2ce4b60c55 | ||
|
|
c9888804cd | ||
|
|
413b0d9fae | ||
|
|
b24095236f | ||
|
|
21d239ce68 | ||
|
|
d6e4e3c5ed | ||
|
|
552b103e8b | ||
|
|
1123a9a93d | ||
|
|
8b30e3cc5c | ||
|
|
b86e65d2ca | ||
|
|
d5e2841083 | ||
|
|
7dad5dcd17 | ||
|
|
ac0b640146 | ||
|
|
e125d146b5 | ||
|
|
a41ffceca4 | ||
|
|
7edb047c0c | ||
|
|
6504f2565b | ||
|
|
6b418a125b | ||
|
|
36827a1667 | ||
|
|
1118c56356 | ||
|
|
bd071e3e60 | ||
|
|
36f3a2e26d | ||
|
|
fee7e96176 | ||
|
|
ef4e3a30fb | ||
|
|
39532d5da0 | ||
|
|
4d216bae4d | ||
|
|
21563914c7 | ||
|
|
accb77f227 | ||
|
|
e73e1bd078 | ||
|
|
940313d28b | ||
|
|
9815ec7d81 | ||
|
|
a7cad0f1a5 | ||
|
|
a624b4758d | ||
|
|
ee5684b130 | ||
|
|
2f8da5957b | ||
|
|
3f6f77d0e2 | ||
|
|
5bceffbeaa | ||
|
|
9e449e2858 | ||
|
|
b60588a749 | ||
|
|
c322657666 | ||
|
|
a1846c008a | ||
|
|
a6824db622 | ||
|
|
e6f69aa74c | ||
|
|
a9c09f33cb | ||
|
|
9eb2196617 | ||
|
|
131759ec96 | ||
|
|
365a3e250f | ||
|
|
f3a1f3cc20 | ||
|
|
ae509b4ae9 | ||
|
|
43e2be0333 | ||
|
|
20a40b33ce | ||
|
|
a9b07c4b47 | ||
|
|
2a5c7cc0ab | ||
|
|
afb18b8142 | ||
|
|
9a580915e6 | ||
|
|
0944af3d31 | ||
|
|
9338efcefc | ||
|
|
6b9e0ce799 | ||
|
|
d4c3c24849 | ||
|
|
30d935a768 | ||
|
|
073d42c416 | ||
|
|
f11b9644cf | ||
|
|
87922e9577 | ||
|
|
8412727414 | ||
|
|
f0a95503d9 | ||
|
|
16e0fa2eef | ||
|
|
2fa944d254 | ||
|
|
b0d19035a4 | ||
|
|
054dea366e | ||
|
|
aaf0b597dc | ||
|
|
19372c8194 | ||
|
|
eb74adad44 | ||
|
|
d5c04e1342 | ||
|
|
2b9632c8fd | ||
|
|
24920ae903 | ||
|
|
6f096632a2 | ||
|
|
a42eacec4b | ||
|
|
e723399f7f | ||
|
|
48936bed9b | ||
|
|
ee70474cc7 | ||
|
|
c3fa7144ee | ||
|
|
5dd02a5b8e | ||
|
|
c0f01e4cb9 | ||
|
|
fed84cb50a | ||
|
|
80545c4d07 | ||
|
|
0b1faec092 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
8d7d33393d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb | ||
|
|
8d630707af | ||
|
|
a5b52431b7 | ||
|
|
0138d757c8 | ||
|
|
844195b84f | ||
|
|
8ff05b2e8f | ||
|
|
c8c56c544e | ||
|
|
1c43655336 | ||
|
|
c269c8c6b8 | ||
|
|
3142b6cc6d | ||
|
|
58e141685a | ||
|
|
e17f63a50c | ||
|
|
838ef5dcc5 | ||
|
|
e53d3d1269 | ||
|
|
2330420c0d | ||
|
|
65ac277074 | ||
|
|
b7982ca348 | ||
|
|
2748b49a44 | ||
|
|
7345027762 | ||
|
|
68f874e433 | ||
|
|
54a82b1664 | ||
|
|
93dc585145 | ||
|
|
6a143efd2c | ||
|
|
0116eb20ab | ||
|
|
79e9d1b357 | ||
|
|
b89ce82e25 | ||
|
|
b43a198fd8 | ||
|
|
b40ca4baf3 | ||
|
|
8df77c9221 | ||
|
|
f67555576f | ||
|
|
f0a4c37073 | ||
|
|
7972261237 | ||
|
|
3b4a8e5e0f | ||
|
|
5ef3b8ee3f | ||
|
|
597752a4bc | ||
|
|
07a244f569 | ||
|
|
eb9385840f | ||
|
|
30b689037a | ||
|
|
ba33c885d5 | ||
|
|
a4ed9e4d47 | ||
|
|
df5767198c | ||
|
|
81c7f3221a | ||
|
|
2cbd8733a1 | ||
|
|
71d1dfe9bd | ||
|
|
459712d25c | ||
|
|
61de2d414d | ||
|
|
0b7cd4c1a7 | ||
|
|
62c033ccf8 | ||
|
|
e637487984 | ||
|
|
8fc43a00f8 | ||
|
|
031d62ca44 | ||
|
|
8c4c357351 | ||
|
|
d8d8191a32 | ||
|
|
a876c0a744 | ||
|
|
c36f913a90 | ||
|
|
ed597f00c0 | ||
|
|
4957d3ae93 | ||
|
|
8835e3493d | ||
|
|
027a1631ef | ||
|
|
d7a6607a25 | ||
|
|
7a58bc58c9 | ||
|
|
88be23c3e3 | ||
|
|
8f095dfbc9 | ||
|
|
72207691a3 | ||
|
|
8998ca652e | ||
|
|
f4ae5f19ff | ||
|
|
d1ea608671 | ||
|
|
ac7ecac2c1 | ||
|
|
64071165c4 | ||
|
|
9c25a33cd9 | ||
|
|
3100d602c4 | ||
|
|
d80908a1fc | ||
|
|
bc17a10550 | ||
|
|
694c185373 | ||
|
|
02f3dfefb9 | ||
|
|
3515686daf | ||
|
|
c116bf05be | ||
|
|
4842e3b912 | ||
|
|
9bdf194d70 | ||
|
|
88aa29e94c | ||
|
|
1dfebed93a | ||
|
|
b36d2ec4c6 | ||
|
|
089f128020 | ||
|
|
e30f95c340 | ||
|
|
2c87d96d75 | ||
|
|
1f9b13dc35 | ||
|
|
d831c1cb88 | ||
|
|
bc72a0a591 | ||
|
|
ad2b75e3f0 | ||
|
|
efd4e30edf | ||
|
|
f79a5a2db6 | ||
|
|
9d8e46e5b2 | ||
|
|
0320285a25 | ||
|
|
e04e58d8b3 | ||
|
|
fc03303c29 | ||
|
|
95b94a9da7 | ||
|
|
3d3dd98549 | ||
|
|
097e4ca948 | ||
|
|
a64908e571 | ||
|
|
0be9ca272b | ||
|
|
e434e3d142 | ||
|
|
4779300dea | ||
|
|
7a225e0a4f | ||
|
|
53d3de4909 | ||
|
|
4ede88cc1f | ||
|
|
800ca9d329 | ||
|
|
b3fa1ad60e | ||
|
|
5b6b5bf359 | ||
|
|
8a479d42ff | ||
|
|
031e78cb20 | ||
|
|
f992ba9106 | ||
|
|
7118829107 | ||
|
|
5aea442939 | ||
|
|
bf385a9f95 |
@@ -1,5 +1,4 @@
|
||||
services:
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: clickhouse
|
||||
@@ -24,9 +23,8 @@ services:
|
||||
retries: 3
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
zookeeper:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
image: signoz/zookeeper:3.7.1
|
||||
container_name: zookeeper
|
||||
volumes:
|
||||
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
|
||||
@@ -41,9 +39,8 @@ services:
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:0.111.29
|
||||
image: signoz/signoz-schema-migrator:v0.129.0
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -55,9 +52,8 @@ services:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:0.111.29
|
||||
image: signoz/signoz-schema-migrator:v0.129.0
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
27
.devenv/docker/postgres/compose.yaml
Normal file
27
.devenv/docker/postgres/compose.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:15
|
||||
container_name: postgres
|
||||
environment:
|
||||
POSTGRES_DB: signoz
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: password
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-d",
|
||||
"signoz",
|
||||
"-U",
|
||||
"postgres"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 30s
|
||||
retries: 3
|
||||
restart: on-failure
|
||||
ports:
|
||||
- "127.0.0.1:5432:5432/tcp"
|
||||
volumes:
|
||||
- ${PWD}/fs/tmp/var/lib/postgresql/data/:/var/lib/postgresql/data/
|
||||
29
.devenv/docker/signoz-otel-collector/compose.yaml
Normal file
29
.devenv/docker/signoz-otel-collector/compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
services:
|
||||
signoz-otel-collector:
|
||||
image: signoz/signoz-otel-collector:v0.128.2
|
||||
container_name: signoz-otel-collector-dev
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "13133:13133" # health check extension
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:13133
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
@@ -0,0 +1,96 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: signozclickhousemetrics
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
enable_exp_histogram: true
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://host.docker.internal:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://host.docker.internal:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://host.docker.internal:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -1,6 +1,7 @@
|
||||
.git
|
||||
.github
|
||||
.vscode
|
||||
.devenv
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
|
||||
33
.github/CODEOWNERS
vendored
33
.github/CODEOWNERS
vendored
@@ -2,12 +2,43 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
|
||||
/frontend/ @YounixM
|
||||
/frontend/ @SigNoz/frontend @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/deploy/ @SigNoz/devops
|
||||
.github @SigNoz/devops
|
||||
|
||||
# Scaffold Owners
|
||||
/pkg/config/ @grandwizard28
|
||||
/pkg/errors/ @grandwizard28
|
||||
/pkg/factory/ @grandwizard28
|
||||
/pkg/types/ @grandwizard28
|
||||
/pkg/valuer/ @grandwizard28
|
||||
/cmd/ @grandwizard28
|
||||
.golangci.yml @grandwizard28
|
||||
|
||||
# Zeus Owners
|
||||
/pkg/zeus/ @vikrantgupta25
|
||||
/ee/zeus/ @vikrantgupta25
|
||||
/pkg/licensing/ @vikrantgupta25
|
||||
/ee/licensing/ @vikrantgupta25
|
||||
|
||||
# SQL Owners
|
||||
/pkg/sqlmigration/ @vikrantgupta25
|
||||
/ee/sqlmigration/ @vikrantgupta25
|
||||
/pkg/sqlschema/ @vikrantgupta25
|
||||
/ee/sqlschema/ @vikrantgupta25
|
||||
|
||||
# Analytics Owners
|
||||
/pkg/analytics/ @vikrantgupta25
|
||||
/pkg/statsreporter/ @vikrantgupta25
|
||||
|
||||
# Querier Owners
|
||||
/pkg/querier/ @srikanthccv
|
||||
/pkg/variables/ @srikanthccv
|
||||
/pkg/types/querybuildertypes/ @srikanthccv
|
||||
/pkg/querybuilder/ @srikanthccv
|
||||
/pkg/telemetrylogs/ @srikanthccv
|
||||
/pkg/telemetrymetadata/ @srikanthccv
|
||||
/pkg/telemetrymetrics/ @srikanthccv
|
||||
/pkg/telemetrytraces/ @srikanthccv
|
||||
|
||||
73
.github/pull_request_template.md
vendored
73
.github/pull_request_template.md
vendored
@@ -1,17 +1,72 @@
|
||||
### Summary
|
||||
## 📄 Summary
|
||||
|
||||
<!-- ✍️ A clear and concise description...-->
|
||||
<!-- Describe the purpose of the PR in a few sentences. What does it fix/add/update? -->
|
||||
|
||||
#### Related Issues / PR's
|
||||
---
|
||||
|
||||
<!-- ✍️ Add the issues being resolved here and related PR's where applicable -->
|
||||
## ✅ Changes
|
||||
|
||||
#### Screenshots
|
||||
- [ ] Feature: Brief description
|
||||
- [ ] Bug fix: Brief description
|
||||
|
||||
NA
|
||||
---
|
||||
|
||||
<!-- ✍️ Add screenshots of before and after changes where applicable-->
|
||||
## 🏷️ Required: Add Relevant Labels
|
||||
|
||||
#### Affected Areas and Manually Tested Areas
|
||||
> ⚠️ **Manually add appropriate labels in the PR sidebar**
|
||||
Please select one or more labels (as applicable):
|
||||
|
||||
<!-- ✍️ Add details of blast radius and dev testing areas where applicable-->
|
||||
ex:
|
||||
|
||||
- `frontend`
|
||||
- `backend`
|
||||
- `devops`
|
||||
- `bug`
|
||||
- `enhancement`
|
||||
- `ui`
|
||||
- `test`
|
||||
|
||||
---
|
||||
|
||||
## 👥 Reviewers
|
||||
|
||||
> Tag the relevant teams for review:
|
||||
|
||||
- frontend / backend / devops
|
||||
|
||||
---
|
||||
|
||||
## 🧪 How to Test
|
||||
|
||||
<!-- Describe how reviewers can test this PR -->
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Related Issues
|
||||
|
||||
<!-- Reference any related issues (e.g. Fixes #123, Closes #456) -->
|
||||
Closes #
|
||||
|
||||
---
|
||||
|
||||
## 📸 Screenshots / Screen Recording (if applicable / mandatory for UI related changes)
|
||||
|
||||
<!-- Add screenshots or GIFs to help visualize changes -->
|
||||
|
||||
---
|
||||
|
||||
## 📋 Checklist
|
||||
|
||||
- [ ] Dev Review
|
||||
- [ ] Test cases added (Unit/ Integration / E2E)
|
||||
- [ ] Manually tested the changes
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 👀 Notes for Reviewers
|
||||
|
||||
<!-- Anything reviewers should keep in mind while reviewing -->
|
||||
|
||||
42
.github/workflows/README.md
vendored
42
.github/workflows/README.md
vendored
@@ -1,42 +0,0 @@
|
||||
# Github actions
|
||||
|
||||
## Testing the UI manually on each PR
|
||||
|
||||
First we need to make sure the UI is ready
|
||||
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
|
||||
* This job will run until the PR is merged or closed to keep the local tunneling alive
|
||||
- github will cancel this job if the PR wasn't merged after 6h
|
||||
- if the job was cancel, go to the action and press `Re-run all jobs`
|
||||
|
||||
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th> Variables </th>
|
||||
<th> Description </th>
|
||||
<th> Example </th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> REPONAME </td>
|
||||
<td> Provide the DockerHub user/organisation name of the image. </td>
|
||||
<td> signoz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> DOCKERHUB_USERNAME </td>
|
||||
<td> Docker hub username </td>
|
||||
<td> signoz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> DOCKERHUB_TOKEN </td>
|
||||
<td> Docker hub password/token with push permission </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> SONAR_TOKEN </td>
|
||||
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
||||
<td> **** </td>
|
||||
</tr>
|
||||
83
.github/workflows/build-community.yaml
vendored
Normal file
83
.github/workflows/build-community.yaml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./cmd/community
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
117
.github/workflows/build-enterprise.yaml
vendored
Normal file
117
.github/workflows/build-enterprise.yaml
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: enterprise-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
128
.github/workflows/build-staging.yaml
vendored
Normal file
128
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.NP_TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.NP_PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.NP_APPCUES_APP_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: staging-dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: staging-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
48
.github/workflows/build.yaml
vendored
48
.github/workflows/build.yaml
vendored
@@ -1,48 +0,0 @@
|
||||
name: build-pipeline
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
jobs:
|
||||
build-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: cd frontend && yarn install
|
||||
- name: Build frontend static files
|
||||
shell: bash
|
||||
run: |
|
||||
make build-frontend-static
|
||||
|
||||
build-signoz:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: Build signoz image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-signoz-amd64
|
||||
|
||||
build-signoz-community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: Build signoz community image
|
||||
shell: bash
|
||||
run: |
|
||||
make build-signoz-community-amd64
|
||||
12
.github/workflows/commitci.yaml
vendored
12
.github/workflows/commitci.yaml
vendored
@@ -25,15 +25,3 @@ jobs:
|
||||
else
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"
|
||||
fi
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: lint
|
||||
uses: wagoid/commitlint-github-action@v5
|
||||
|
||||
39
.github/workflows/goci.yaml
vendored
39
.github/workflows/goci.yaml
vendored
@@ -18,6 +18,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_TEST_CONTEXT: ./...
|
||||
GO_VERSION: 1.23
|
||||
fmt:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -26,6 +27,7 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -34,3 +36,40 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
deps:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
uses: signoz/primus.workflows/.github/workflows/go-deps.yaml@main
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.23
|
||||
build:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: go-install
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
- name: qemu-install
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: aarch64-install
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: docker-community
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-community
|
||||
- name: docker-enterprise
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-enterprise
|
||||
|
||||
10
.github/workflows/gor-signoz-community.yaml
vendored
10
.github/workflows/gor-signoz-community.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
run: |
|
||||
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
|
||||
- name: build-frontend
|
||||
run: make build-frontend-static
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
env:
|
||||
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
|
||||
CONFIG_PATH: cmd/community/.goreleaser.yaml
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: checkout
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
needs: build
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
WORKDIR: pkg/query-service
|
||||
WORKDIR: cmd/community
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
|
||||
12
.github/workflows/gor-signoz.yaml
vendored
12
.github/workflows/gor-signoz.yaml
vendored
@@ -33,10 +33,10 @@ jobs:
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> .env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> .env
|
||||
- name: build-frontend
|
||||
run: make build-frontend-static
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
env:
|
||||
CONFIG_PATH: ee/query-service/.goreleaser.yaml
|
||||
CONFIG_PATH: cmd/enterprise/.goreleaser.yaml
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: checkout
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
|
||||
55
.github/workflows/integrationci.yaml
vendored
Normal file
55
.github/workflows/integrationci.yaml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: integrationci
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
src:
|
||||
- bootstrap
|
||||
- auth
|
||||
- querier
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 24.1.2-alpine
|
||||
- 25.5.6
|
||||
schema-migrator-version:
|
||||
- v0.128.1
|
||||
postgres-version:
|
||||
- 15
|
||||
if: |
|
||||
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.13
|
||||
- name: poetry
|
||||
run: |
|
||||
python -m pip install poetry==2.1.2
|
||||
python -m poetry config virtualenvs.in-project true
|
||||
cd tests/integration && poetry install --no-root
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
poetry run pytest \
|
||||
--basetemp=./tmp/ \
|
||||
src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
--postgres-version ${{matrix.postgres-version}} \
|
||||
--clickhouse-version ${{matrix.clickhouse-version}} \
|
||||
--schema-migrator-version ${{matrix.schema-migrator-version}}
|
||||
4
.github/workflows/prereleaser.yaml
vendored
4
.github/workflows/prereleaser.yaml
vendored
@@ -1,10 +1,6 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||
schedule:
|
||||
- cron: '30 9 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
134
.github/workflows/push.yaml
vendored
134
.github/workflows/push.yaml
vendored
@@ -1,134 +0,0 @@
|
||||
name: push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
image-build-and-push-signoz:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: create-env-file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: branch-name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish-signoz
|
||||
run: make build-push-signoz
|
||||
- name: qs-docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: publish-query-service
|
||||
run: |
|
||||
SIGNOZ_DOCKER_IMAGE=query-service make build-push-signoz
|
||||
|
||||
image-build-and-push-signoz-community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
- name: branch-name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v7.0.7
|
||||
- name: docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish-signoz-community
|
||||
run: make build-push-signoz-community
|
||||
- name: qs-docker-tag
|
||||
run: |
|
||||
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
|
||||
tag="${{ steps.branch-name.outputs.tag }}"
|
||||
tag="${tag:1}"
|
||||
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
|
||||
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
|
||||
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: publish-query-service-oss
|
||||
run: |
|
||||
SIGNOZ_COMMUNITY_DOCKER_IMAGE=query-service make build-push-signoz-community
|
||||
16
.github/workflows/remove-label.yaml
vendored
16
.github/workflows/remove-label.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: remove-label
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [synchronize]
|
||||
|
||||
jobs:
|
||||
remove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove label testing-deploy from PR
|
||||
uses: buildsville/add-remove-label@v2.0.0
|
||||
with:
|
||||
label: testing-deploy
|
||||
type: remove
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
62
.github/workflows/run-e2e.yaml
vendored
Normal file
62
.github/workflows/run-e2e.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: e2eci
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
userRole:
|
||||
description: "Role of the user (ADMIN, EDITOR, VIEWER)"
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- ADMIN
|
||||
- EDITOR
|
||||
- VIEWER
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run Playwright Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: lts/*
|
||||
|
||||
- name: Mask secrets and input
|
||||
run: |
|
||||
echo "::add-mask::${{ secrets.BASE_URL }}"
|
||||
echo "::add-mask::${{ secrets.LOGIN_USERNAME }}"
|
||||
echo "::add-mask::${{ secrets.LOGIN_PASSWORD }}"
|
||||
echo "::add-mask::${{ github.event.inputs.userRole }}"
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: |
|
||||
npm install -g yarn
|
||||
yarn
|
||||
|
||||
- name: Install Playwright Browsers
|
||||
working-directory: frontend
|
||||
run: yarn playwright install --with-deps
|
||||
|
||||
- name: Run Playwright Tests
|
||||
working-directory: frontend
|
||||
run: |
|
||||
BASE_URL="${{ secrets.BASE_URL }}" \
|
||||
LOGIN_USERNAME="${{ secrets.LOGIN_USERNAME }}" \
|
||||
LOGIN_PASSWORD="${{ secrets.LOGIN_PASSWORD }}" \
|
||||
USER_ROLE="${{ github.event.inputs.userRole }}" \
|
||||
yarn playwright test
|
||||
|
||||
- name: Upload Playwright Report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: frontend/playwright-report/
|
||||
retention-days: 30
|
||||
55
.github/workflows/staging-deployment.yaml
vendored
55
.github/workflows/staging-deployment.yaml
vendored
@@ -1,55 +0,0 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout ${GITHUB_BRANCH}
|
||||
git pull
|
||||
make build-signoz-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
55
.github/workflows/testing-deployment.yaml
vendored
55
.github/workflows/testing-deployment.yaml
vendored
@@ -1,55 +0,0 @@
|
||||
name: testing-deployment
|
||||
# Trigger deployment only on testing-deploy label on pull request
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy PR branch to testing
|
||||
runs-on: ubuntu-latest
|
||||
environment: testing
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
git checkout --track origin/${GITHUB_BRANCH}
|
||||
make build-signoz-amd64
|
||||
make run-testing
|
||||
EOF
|
||||
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
151
.gitignore
vendored
151
.gitignore
vendored
@@ -54,19 +54,19 @@ ee/query-service/tests/test-deploy/data/
|
||||
bin/
|
||||
.local/
|
||||
*/query-service/queries.active
|
||||
ee/query-service/db
|
||||
|
||||
# e2e
|
||||
|
||||
e2e/node_modules/
|
||||
e2e/test-results/
|
||||
e2e/playwright-report/
|
||||
e2e/blob-report/
|
||||
e2e/playwright/.cache/
|
||||
e2e/.auth
|
||||
|
||||
# go
|
||||
vendor/
|
||||
**/main/**
|
||||
__debug_bin**
|
||||
|
||||
# git-town
|
||||
.git-branches.toml
|
||||
@@ -79,6 +79,153 @@ deploy/common/clickhouse/user_scripts/
|
||||
|
||||
queries.active
|
||||
|
||||
# tmp
|
||||
**/tmp/**
|
||||
|
||||
# .devenv tmp files
|
||||
.devenv/**/tmp/**
|
||||
.qodo
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
### Python Patch ###
|
||||
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python
|
||||
34
.golangci.yml
Normal file
34
.golangci.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
linters:
|
||||
default: standard
|
||||
enable:
|
||||
- bodyclose
|
||||
- misspell
|
||||
- nilnil
|
||||
- sloglint
|
||||
- depguard
|
||||
- iface
|
||||
- unparam
|
||||
|
||||
linters-settings:
|
||||
sloglint:
|
||||
no-mixed-args: true
|
||||
kv-only: true
|
||||
no-global: all
|
||||
context: all
|
||||
static-msg: true
|
||||
msg-style: lowercased
|
||||
key-naming-case: snake
|
||||
depguard:
|
||||
rules:
|
||||
nozap:
|
||||
deny:
|
||||
- pkg: "go.uber.org/zap"
|
||||
desc: "Do not use zap logger. Use slog instead."
|
||||
iface:
|
||||
enable:
|
||||
- identical
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- "pkg/query-service"
|
||||
- "ee/query-service"
|
||||
- "scripts/"
|
||||
17
.versions/alpine
Normal file
17
.versions/alpine
Normal file
@@ -0,0 +1,17 @@
|
||||
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
|
||||
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
|
||||
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
|
||||
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
|
||||
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
|
||||
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
|
||||
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
|
||||
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
|
||||
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
|
||||
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
|
||||
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
|
||||
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
|
||||
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
|
||||
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
|
||||
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
|
||||
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
|
||||
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01
|
||||
62
ADVOCATE.md
Normal file
62
ADVOCATE.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# SigNoz Community Advocate Program
|
||||
|
||||
Our community is filled with passionate developers who love SigNoz and have been helping spread the word about observability across the world. The SigNoz Community Advocate Program is our way of recognizing these incredible community members and creating deeper collaboration opportunities.
|
||||
|
||||
## What is the SigNoz Community Advocate Program?
|
||||
|
||||
The SigNoz Community Advocate Program celebrates and supports community members who are already passionate about observability and helping fellow developers. If you're someone who loves discussing SigNoz, helping others with their implementations, or sharing knowledge about observability practices, this program is designed with you in mind.
|
||||
|
||||
Our advocates are the heart of the SigNoz community, helping other developers succeed with observability and providing valuable insights that help us build better products.
|
||||
|
||||
## What Do Advocates Do?
|
||||
|
||||
1. **Community Support**
|
||||
|
||||
- Help fellow developers in our Slack community and GitHub Discussions
|
||||
- Answer questions and share solutions
|
||||
- Guide newcomers through SigNoz self-host implementations
|
||||
|
||||
2. **Knowledge Sharing**
|
||||
|
||||
- Spread awareness about observability best practices on developer forums
|
||||
- Create content like blog posts, social media posts, and videos
|
||||
- Host local meetups and events in their regions
|
||||
|
||||
3. **Product Collaboration**
|
||||
|
||||
- Provide insights on features, changes, and improvements the community needs
|
||||
- Beta test new features and provide early feedback
|
||||
- Help us understand real-world use cases and pain points
|
||||
|
||||
## What's In It For You?
|
||||
|
||||
**Recognition & Swag**
|
||||
|
||||
- Official recognition as a SigNoz advocate
|
||||
- Welcome hamper upon joining
|
||||
- Exclusive swag box within your first 3 months
|
||||
- Feature on our website (with your permission)
|
||||
|
||||
**Early Access**
|
||||
|
||||
- First look at new features and updates
|
||||
- Direct line to the SigNoz team for feedback and suggestions
|
||||
- Opportunity to influence product roadmap
|
||||
|
||||
**Community Impact**
|
||||
|
||||
- Help shape the observability landscape
|
||||
- Build your reputation in the developer community
|
||||
- Connect with like-minded developers globally
|
||||
|
||||
## How Does It Work?
|
||||
|
||||
Currently, the SigNoz Community Advocate Program is **invite-only**. We're starting with a small group of passionate community members who have already been making a difference.
|
||||
|
||||
We'll be working closely with our first advocates to shape the program details, benefits, and structure based on what works best for everyone involved.
|
||||
|
||||
If you're interested in learning more about the program or want to get more involved in the SigNoz community, join our [Slack community](https://signoz-community.slack.com/) and let us know!
|
||||
|
||||
---
|
||||
|
||||
*The SigNoz Community Advocate Program recognizes and celebrates the amazing community members who are already passionate about helping fellow developers succeed with observability.*
|
||||
@@ -77,3 +77,5 @@ Need assistance? Join our Slack community:
|
||||
## Where do I go from here?
|
||||
|
||||
- Set up your [development environment](docs/contributing/development.md)
|
||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
||||
- Explore the [SigNoz Community Advocate Program](ADVOCATE.md), which recognises contributors who support the community, share their expertise, and help shape SigNoz's future.
|
||||
2
LICENSE
2
LICENSE
@@ -2,7 +2,7 @@ Copyright (c) 2020-present SigNoz Inc.
|
||||
|
||||
Portions of this software are licensed as follows:
|
||||
|
||||
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||
* All content that resides under the "ee/" and the "cmd/enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
|
||||
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
|
||||
|
||||
|
||||
330
Makefile
330
Makefile
@@ -1,49 +1,45 @@
|
||||
#
|
||||
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
|
||||
#
|
||||
##############################################################
|
||||
# variables
|
||||
##############################################################
|
||||
SHELL := /bin/bash
|
||||
SRC ?= $(shell pwd)
|
||||
NAME ?= signoz
|
||||
OS ?= $(shell uname -s | tr '[A-Z]' '[a-z]')
|
||||
ARCH ?= $(shell uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
|
||||
COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD)
|
||||
BRANCH_NAME ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
VERSION ?= $(BRANCH_NAME)-$(COMMIT_SHORT_SHA)
|
||||
TIMESTAMP ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
ARCHS ?= amd64 arm64
|
||||
TARGET_DIR ?= $(shell pwd)/target
|
||||
|
||||
# Build variables
|
||||
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
LICENSE_SIGNOZ_IO ?= https://license.signoz.io/api/v1
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
DEV_ZEUS_URL ?= https://api.staging.signoz.cloud
|
||||
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
GO_BUILD_LDFLAG_ZEUS_URL = -X github.com/SigNoz/signoz/ee/zeus.url=$(ZEUS_URL)
|
||||
LICENSE_URL ?= https://license.signoz.io
|
||||
GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=$(LICENSE_URL)
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm
|
||||
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
|
||||
GORELEASER_BIN ?= goreleaser
|
||||
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
|
||||
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/cmd/community
|
||||
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
||||
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/cmd/enterprise
|
||||
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
||||
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
|
||||
REPONAME ?= signoz
|
||||
DOCKER_TAG ?= $(BUILD_VERSION)
|
||||
SIGNOZ_DOCKER_IMAGE ?= signoz
|
||||
SIGNOZ_COMMUNITY_DOCKER_IMAGE ?= signoz-community
|
||||
|
||||
# Build-time Go variables
|
||||
PACKAGE?=go.signoz.io/signoz
|
||||
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
|
||||
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
|
||||
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
||||
PROD_LD_FLAGS=-X ${zeusURL}=${ZEUS_URL} -X ${licenseSignozIo}=${LICENSE_SIGNOZ_IO}
|
||||
DEV_LD_FLAGS=-X ${zeusURL}=${DEV_ZEUS_URL} -X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
|
||||
DOCKERFILE_COMMUNITY = $(SRC)/cmd/community/Dockerfile
|
||||
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
|
||||
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
|
||||
DOCKERFILE_ENTERPRISE = $(SRC)/cmd/enterprise/Dockerfile
|
||||
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
|
||||
JS_BUILD_CONTEXT = $(SRC)/frontend
|
||||
|
||||
##############################################################
|
||||
# directories
|
||||
##############################################################
|
||||
$(TARGET_DIR):
|
||||
mkdir -p $(TARGET_DIR)
|
||||
|
||||
##############################################################
|
||||
# common commands
|
||||
@@ -60,11 +56,27 @@ devenv-clickhouse: ## Run clickhouse in devenv
|
||||
@cd .devenv/docker/clickhouse; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
.PHONY: devenv-postgres
|
||||
devenv-postgres: ## Run postgres in devenv
|
||||
@cd .devenv/docker/postgres; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
.PHONY: devenv-signoz-otel-collector
|
||||
devenv-signoz-otel-collector: ## Run signoz-otel-collector in devenv (requires clickhouse to be running)
|
||||
@cd .devenv/docker/signoz-otel-collector; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
.PHONY: devenv-up
|
||||
devenv-up: devenv-clickhouse devenv-signoz-otel-collector ## Start both clickhouse and signoz-otel-collector for local development
|
||||
@echo "Development environment is ready!"
|
||||
@echo " - ClickHouse: http://localhost:8123"
|
||||
@echo " - Signoz OTel Collector: grpc://localhost:4317, http://localhost:4318"
|
||||
|
||||
##############################################################
|
||||
# run commands
|
||||
# go commands
|
||||
##############################################################
|
||||
.PHONY: run-go
|
||||
run-go: ## Runs the go backend server
|
||||
.PHONY: go-run-enterprise
|
||||
go-run-enterprise: ## Runs the enterprise go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
@@ -73,147 +85,127 @@ run-go: ## Runs the go backend server
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
go run -race \
|
||||
./ee/query-service/main.go \
|
||||
--config ./pkg/query-service/config/prometheus.yml \
|
||||
--cluster cluster \
|
||||
--use-logs-new-schema true \
|
||||
--use-trace-new-schema true
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
|
||||
all: build-push-frontend build-push-signoz
|
||||
.PHONY: go-test
|
||||
go-test: ## Runs go unit tests
|
||||
@go test -race ./...
|
||||
|
||||
# Steps to build static files of frontend
|
||||
build-frontend-static:
|
||||
@echo "------------------"
|
||||
@echo "--> Building frontend static files"
|
||||
@echo "------------------"
|
||||
@cd $(FRONTEND_DIRECTORY) && \
|
||||
rm -rf build && \
|
||||
CI=1 yarn install && \
|
||||
yarn build && \
|
||||
ls -l build
|
||||
.PHONY: go-run-community
|
||||
go-run-community: ## Runs the community go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
SIGNOZ_JWT_SECRET=secret \
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
|
||||
# Steps to build static binary of signoz
|
||||
.PHONY: build-signoz-static
|
||||
build-signoz-static:
|
||||
@echo "------------------"
|
||||
@echo "--> Building signoz static binary"
|
||||
@echo "------------------"
|
||||
@if [ $(DEV_BUILD) != "" ]; then \
|
||||
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
|
||||
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
go-build-community: ## Builds the go backend server for community
|
||||
go-build-community: $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
$(GO_BUILD_ARCHS_COMMUNITY): go-build-community-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)-community"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
else \
|
||||
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
|
||||
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
|
||||
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${PROD_LD_FLAGS}"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
fi
|
||||
|
||||
.PHONY: build-signoz-static-amd64
|
||||
build-signoz-static-amd64:
|
||||
make GOARCH=amd64 build-signoz-static
|
||||
|
||||
.PHONY: build-signoz-static-arm64
|
||||
build-signoz-static-arm64:
|
||||
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-signoz-static
|
||||
|
||||
# Steps to build static binary of signoz for all platforms
|
||||
.PHONY: build-signoz-static-all
|
||||
build-signoz-static-all: build-signoz-static-amd64 build-signoz-static-arm64 build-frontend-static
|
||||
|
||||
# Steps to build and push docker image of signoz
|
||||
.PHONY: build-signoz-amd64 build-push-signoz
|
||||
# Step to build docker image of signoz in amd64 (used in build pipeline)
|
||||
build-signoz-amd64: build-signoz-static-amd64 build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building signoz docker image for amd64"
|
||||
@echo "------------------"
|
||||
@docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
|
||||
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" .
|
||||
|
||||
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
|
||||
build-push-signoz: build-signoz-static-all
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing signoz docker image"
|
||||
@echo "------------------"
|
||||
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
|
||||
--push --platform linux/arm64,linux/amd64 \
|
||||
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
# Step to build docker image of signoz community in amd64 (used in build pipeline)
|
||||
build-signoz-community-amd64:
|
||||
@echo "------------------"
|
||||
@echo "--> Building signoz docker image for amd64"
|
||||
@echo "------------------"
|
||||
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-signoz-amd64
|
||||
|
||||
# Step to build and push docker image of signoz community in amd64 and arm64 (used in push pipeline)
|
||||
build-push-signoz-community:
|
||||
@echo "------------------"
|
||||
@echo "--> Building and pushing signoz community docker image"
|
||||
@echo "------------------"
|
||||
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-push-signoz
|
||||
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
run-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
|
||||
|
||||
run-testing:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
|
||||
|
||||
down-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
|
||||
|
||||
check-no-ee-references:
|
||||
@echo "Checking for 'ee' package references in 'pkg' directory..."
|
||||
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
|
||||
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
|
||||
exit 1; \
|
||||
.PHONY: go-build-enterprise $(GO_BUILD_ARCHS_ENTERPRISE)
|
||||
go-build-enterprise: ## Builds the go backend server for enterprise
|
||||
go-build-enterprise: $(GO_BUILD_ARCHS_ENTERPRISE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
test:
|
||||
go test ./pkg/...
|
||||
|
||||
########################################################
|
||||
# Goreleaser
|
||||
########################################################
|
||||
.PHONY: gor-snapshot gor-snapshot-histogram-quantile gor-snapshot-signoz gor-snapshot-signoz-community gor-split gor-split-histogram-quantile gor-split-signoz gor-split-signoz-community gor-merge
|
||||
|
||||
gor-snapshot:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --snapshot; \
|
||||
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
go-build-enterprise-race: ## Builds the go backend server for enterprise with race
|
||||
go-build-enterprise-race: $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
$(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
${GORELEASER_BIN} release --clean --snapshot; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
gor-snapshot-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
|
||||
##############################################################
|
||||
# js commands
|
||||
##############################################################
|
||||
.PHONY: js-build
|
||||
js-build: ## Builds the js frontend
|
||||
@echo ">> building js frontend"
|
||||
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
|
||||
|
||||
gor-snapshot-signoz: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
|
||||
##############################################################
|
||||
# docker commands
|
||||
##############################################################
|
||||
.PHONY: docker-build-community $(DOCKER_BUILD_ARCHS_COMMUNITY)
|
||||
docker-build-community: ## Builds the docker image for community
|
||||
docker-build-community: $(DOCKER_BUILD_ARCHS_COMMUNITY)
|
||||
$(DOCKER_BUILD_ARCHS_COMMUNITY): docker-build-community-%: go-build-community-% js-build
|
||||
@echo ">> building docker image for $(NAME)-community"
|
||||
@docker build -t "$(DOCKER_REGISTRY_COMMUNITY):$(VERSION)-$*" \
|
||||
--build-arg TARGETARCH="$*" \
|
||||
-f $(DOCKERFILE_COMMUNITY) $(SRC)
|
||||
|
||||
gor-snapshot-signoz-community: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
|
||||
.PHONY: docker-buildx-community
|
||||
docker-buildx-community: ## Builds the docker image for community using buildx
|
||||
docker-buildx-community: go-build-community js-build
|
||||
@echo ">> building docker image for $(NAME)-community"
|
||||
@docker buildx build --file $(DOCKERFILE_COMMUNITY) \
|
||||
--progress plain \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_COMMUNITY):$(VERSION) $(SRC)
|
||||
|
||||
gor-split:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --split; \
|
||||
else \
|
||||
${GORELEASER_BIN} release --clean --split; \
|
||||
fi
|
||||
.PHONY: docker-build-enterprise $(DOCKER_BUILD_ARCHS_ENTERPRISE)
|
||||
docker-build-enterprise: ## Builds the docker image for enterprise
|
||||
docker-build-enterprise: $(DOCKER_BUILD_ARCHS_ENTERPRISE)
|
||||
$(DOCKER_BUILD_ARCHS_ENTERPRISE): docker-build-enterprise-%: go-build-enterprise-% js-build
|
||||
@echo ">> building docker image for $(NAME)"
|
||||
@docker build -t "$(DOCKER_REGISTRY_ENTERPRISE):$(VERSION)-$*" \
|
||||
--build-arg TARGETARCH="$*" \
|
||||
-f $(DOCKERFILE_ENTERPRISE) $(SRC)
|
||||
|
||||
gor-split-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-split
|
||||
.PHONY: docker-buildx-enterprise
|
||||
docker-buildx-enterprise: ## Builds the docker image for enterprise using buildx
|
||||
docker-buildx-enterprise: go-build-enterprise js-build
|
||||
@echo ">> building docker image for $(NAME)"
|
||||
@docker buildx build --file $(DOCKERFILE_ENTERPRISE) \
|
||||
--progress plain \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--push \
|
||||
--tag $(DOCKER_REGISTRY_ENTERPRISE):$(VERSION) $(SRC)
|
||||
|
||||
gor-split-signoz: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-split
|
||||
##############################################################
|
||||
# python commands
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && poetry run black .
|
||||
|
||||
gor-split-signoz-community: build-frontend-static
|
||||
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-split
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && poetry run isort .
|
||||
@cd tests/integration && poetry run autoflake .
|
||||
@cd tests/integration && poetry run pylint .
|
||||
|
||||
gor-merge:
|
||||
${GORELEASER_BIN} continue --merge
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@@ -8,7 +8,6 @@
|
||||
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
@@ -231,6 +230,8 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
||||
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
||||
- [Sahil Khan](https://github.com/sawhil)
|
||||
- [Aditya Singh](https://github.com/aks07)
|
||||
- [Abhi Kumar](https://github.com/ahrefabhi)
|
||||
|
||||
#### DevOps
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ before:
|
||||
builds:
|
||||
- id: signoz
|
||||
binary: bin/signoz
|
||||
main: pkg/query-service/main.go
|
||||
main: ./cmd/community
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- >-
|
||||
@@ -30,13 +30,12 @@ builds:
|
||||
- v8.0
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
|
||||
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
|
||||
- -X main.builtBy=goreleaser
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildVersion={{ .Version }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildTime={{ .Date }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.version=v{{ .Version }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
19
cmd/community/Dockerfile
Normal file
19
cmd/community/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM alpine:3.20.3
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
|
||||
COPY ./target/${OS}-${TARGETARCH}/signoz-community /root/signoz
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz", "server"]
|
||||
20
cmd/community/Dockerfile.multi-arch
Normal file
20
cmd/community/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,20 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz-community /root/signoz-community
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz-community
|
||||
|
||||
ENTRYPOINT ["./signoz-community", "server"]
|
||||
18
cmd/community/main.go
Normal file
18
cmd/community/main.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
|
||||
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
|
||||
|
||||
// register a list of commands to the root command
|
||||
registerServer(cmd.RootCmd, logger)
|
||||
|
||||
cmd.Execute(logger)
|
||||
}
|
||||
116
cmd/community/server.go
Normal file
116
cmd/community/server.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||
var flags signoz.DeprecatedFlags
|
||||
|
||||
serverCmd := &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Run the SigNoz server",
|
||||
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
||||
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||
config, err := cmd.NewSigNozConfig(currCmd.Context(), flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runServer(currCmd.Context(), config, logger)
|
||||
},
|
||||
}
|
||||
|
||||
flags.RegisterFlags(serverCmd)
|
||||
parentCmd.AddCommand(serverCmd)
|
||||
}
|
||||
|
||||
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
|
||||
// print the version
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
// add enterprise sqlstore factories to the community sqlstore factories
|
||||
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
|
||||
|
||||
signoz, err := signoz.New(
|
||||
ctx,
|
||||
config,
|
||||
jwt,
|
||||
zeus.Config{},
|
||||
noopzeus.NewProviderFactory(),
|
||||
licensing.Config{},
|
||||
func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return nooplicensing.NewFactory()
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
return signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
},
|
||||
signoz.NewSQLStoreProviderFactories(),
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := app.NewServer(config, signoz, jwt)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
signoz.Start(ctx)
|
||||
|
||||
if err := signoz.Wait(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = server.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = signoz.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
45
cmd/config.go
Normal file
45
cmd/config.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
)
|
||||
|
||||
func NewSigNozConfig(ctx context.Context, flags signoz.DeprecatedFlags) (signoz.Config, error) {
|
||||
config, err := signoz.NewConfig(
|
||||
ctx,
|
||||
config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
},
|
||||
flags,
|
||||
)
|
||||
if err != nil {
|
||||
return signoz.Config{}, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func NewJWTSecret(_ context.Context, _ *slog.Logger) string {
|
||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
if len(jwtSecret) == 0 {
|
||||
fmt.Println("🚨 CRITICAL SECURITY ISSUE: No JWT secret key specified!")
|
||||
fmt.Println("SIGNOZ_JWT_SECRET environment variable is not set. This has dire consequences for the security of the application.")
|
||||
fmt.Println("Without a JWT secret, user sessions are vulnerable to tampering and unauthorized access.")
|
||||
fmt.Println("Please set the SIGNOZ_JWT_SECRET environment variable immediately.")
|
||||
fmt.Println("For more information, please refer to https://github.com/SigNoz/signoz/issues/8400.")
|
||||
}
|
||||
|
||||
return jwtSecret
|
||||
}
|
||||
@@ -11,7 +11,7 @@ before:
|
||||
builds:
|
||||
- id: signoz
|
||||
binary: bin/signoz
|
||||
main: ee/query-service/main.go
|
||||
main: ./cmd/enterprise
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- >-
|
||||
@@ -30,15 +30,16 @@ builds:
|
||||
- v8.0
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
|
||||
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
|
||||
- -X main.builtBy=goreleaser
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildVersion={{ .Version }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.buildTime={{ .Date }}
|
||||
- -X go.signoz.io/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
|
||||
- -X go.signoz.io/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X go.signoz.io/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- -X github.com/SigNoz/signoz/pkg/version.version=v{{ .Version }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
19
cmd/enterprise/Dockerfile
Normal file
19
cmd/enterprise/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM alpine:3.20.3
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
|
||||
COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz", "server"]
|
||||
46
cmd/enterprise/Dockerfile.integration
Normal file
46
cmd/enterprise/Dockerfile.integration
Normal file
@@ -0,0 +1,46 @@
|
||||
FROM node:18-bullseye AS build
|
||||
|
||||
WORKDIR /opt/
|
||||
COPY ./frontend/ ./
|
||||
RUN CI=1 yarn install
|
||||
RUN CI=1 yarn build
|
||||
|
||||
FROM golang:1.23-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
ARG ZEUSURL
|
||||
|
||||
# This path is important for stacktraces
|
||||
WORKDIR $GOPATH/src/github.com/signoz/signoz
|
||||
WORKDIR /root
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
gcc \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY ./cmd/ ./cmd/
|
||||
COPY ./ee/ ./ee/
|
||||
COPY ./pkg/ ./pkg/
|
||||
COPY ./templates/email /root/templates
|
||||
|
||||
COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
COPY --from=build /opt/build ./web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz", "server"]
|
||||
20
cmd/enterprise/Dockerfile.multi-arch
Normal file
20
cmd/enterprise/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,20 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz", "server"]
|
||||
18
cmd/enterprise/main.go
Normal file
18
cmd/enterprise/main.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
|
||||
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
|
||||
|
||||
// register a list of commands to the root command
|
||||
registerServer(cmd.RootCmd, logger)
|
||||
|
||||
cmd.Execute(logger)
|
||||
}
|
||||
124
cmd/enterprise/server.go
Normal file
124
cmd/enterprise/server.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
enterprisezeus "github.com/SigNoz/signoz/ee/zeus"
|
||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||
var flags signoz.DeprecatedFlags
|
||||
|
||||
serverCmd := &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Run the SigNoz server",
|
||||
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
||||
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||
config, err := cmd.NewSigNozConfig(currCmd.Context(), flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runServer(currCmd.Context(), config, logger)
|
||||
},
|
||||
}
|
||||
|
||||
flags.RegisterFlags(serverCmd)
|
||||
parentCmd.AddCommand(serverCmd)
|
||||
}
|
||||
|
||||
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
|
||||
// print the version
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
// add enterprise sqlstore factories to the community sqlstore factories
|
||||
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
|
||||
|
||||
signoz, err := signoz.New(
|
||||
ctx,
|
||||
config,
|
||||
jwt,
|
||||
enterprisezeus.Config(),
|
||||
httpzeus.NewProviderFactory(),
|
||||
enterpriselicensing.Config(24*time.Hour, 3),
|
||||
func(sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return existingFactories
|
||||
},
|
||||
sqlstoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := enterpriseapp.NewServer(config, signoz, jwt)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
signoz.Start(ctx)
|
||||
|
||||
if err := signoz.Wait(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = server.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = signoz.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
33
cmd/root.go
Normal file
33
cmd/root.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap" //nolint:depguard
|
||||
)
|
||||
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "signoz",
|
||||
Short: "OpenTelemetry-Native Logs, Metrics and Traces in a single pane",
|
||||
Version: version.Info.Version(),
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
}
|
||||
|
||||
func Execute(logger *slog.Logger) {
|
||||
zapLogger := newZapLogger()
|
||||
zap.ReplaceGlobals(zapLogger)
|
||||
defer func() {
|
||||
_ = zapLogger.Sync()
|
||||
}()
|
||||
|
||||
err := RootCmd.Execute()
|
||||
if err != nil {
|
||||
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
15
cmd/zap.go
Normal file
15
cmd/zap.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"go.uber.org/zap" //nolint:depguard
|
||||
"go.uber.org/zap/zapcore" //nolint:depguard
|
||||
)
|
||||
|
||||
// Deprecated: Use `NewLogger` from `pkg/instrumentation` instead.
|
||||
func newZapLogger() *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
@@ -3,6 +3,12 @@
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Version #####################
|
||||
version:
|
||||
banner:
|
||||
# Whether to enable the version banner on startup.
|
||||
enabled: true
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
@@ -44,7 +50,7 @@ cache:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval: 1m
|
||||
cleanup_interval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
@@ -66,7 +72,6 @@ sqlstore:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
timeout:
|
||||
@@ -82,21 +87,52 @@ apiserver:
|
||||
# List of routes to exclude from request responselogging.
|
||||
excluded_routes:
|
||||
- /api/v1/health
|
||||
- /api/v1/version
|
||||
- /
|
||||
|
||||
##################### Querier #####################
|
||||
querier:
|
||||
# The TTL for cached query results.
|
||||
cache_ttl: 168h
|
||||
# The interval for recent data that should not be cached.
|
||||
flux_interval: 5m
|
||||
# The maximum number of concurrent queries for missing ranges.
|
||||
max_concurrent_queries: 4
|
||||
|
||||
##################### TelemetryStore #####################
|
||||
telemetrystore:
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
# Maximum number of idle connections in the connection pool.
|
||||
max_idle_conns: 50
|
||||
# Maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
# Maximum time to wait for a connection to be established.
|
||||
dial_timeout: 5s
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
clickhouse:
|
||||
# The DSN to use for ClickHouse.
|
||||
dsn: http://localhost:9000
|
||||
# The DSN to use for clickhouse.
|
||||
dsn: tcp://localhost:9000
|
||||
# The cluster name to use for clickhouse.
|
||||
cluster: cluster
|
||||
# The query settings for clickhouse.
|
||||
settings:
|
||||
max_execution_time: 0
|
||||
max_execution_time_leaf: 0
|
||||
timeout_before_checking_execution_speed: 0
|
||||
max_bytes_to_read: 0
|
||||
max_result_rows: 0
|
||||
ignore_data_skipping_indices: ""
|
||||
secondary_indices_enable_bulk_filtering: false
|
||||
|
||||
##################### Prometheus #####################
|
||||
prometheus:
|
||||
active_query_tracker:
|
||||
# Whether to enable the active query tracker.
|
||||
enabled: true
|
||||
# The path to use for the active query tracker.
|
||||
path: ""
|
||||
# The maximum number of concurrent queries.
|
||||
max_concurrent: 20
|
||||
|
||||
##################### Alertmanager #####################
|
||||
alertmanager:
|
||||
@@ -109,7 +145,7 @@ alertmanager:
|
||||
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
||||
poll_interval: 1m
|
||||
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
|
||||
external_url: http://localhost:9093
|
||||
external_url: http://localhost:8080
|
||||
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
|
||||
global:
|
||||
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
|
||||
@@ -141,3 +177,72 @@ alertmanager:
|
||||
maintenance_interval: 15m
|
||||
# Retention of the notification logs.
|
||||
retention: 120h
|
||||
|
||||
##################### Emailing #####################
|
||||
emailing:
|
||||
# Whether to enable emailing.
|
||||
enabled: false
|
||||
templates:
|
||||
# The directory containing the email templates. This directory should contain a list of files defined at pkg/types/emailtypes/template.go.
|
||||
directory: /opt/signoz/conf/templates/email
|
||||
smtp:
|
||||
# The SMTP server address.
|
||||
address: localhost:25
|
||||
# The email address to use for the SMTP server.
|
||||
from:
|
||||
# The hello message to use for the SMTP server.
|
||||
hello:
|
||||
# The static headers to send with the email.
|
||||
headers: {}
|
||||
auth:
|
||||
# The username to use for the SMTP server.
|
||||
username:
|
||||
# The password to use for the SMTP server.
|
||||
password:
|
||||
# The secret to use for the SMTP server.
|
||||
secret:
|
||||
# The identity to use for the SMTP server.
|
||||
identity:
|
||||
tls:
|
||||
# Whether to enable TLS. It should be false in most cases since the authentication mechanism should use the STARTTLS extension instead.
|
||||
enabled: false
|
||||
# Whether to skip TLS verification.
|
||||
insecure_skip_verify: false
|
||||
# The path to the CA file.
|
||||
ca_file_path:
|
||||
# The path to the key file.
|
||||
key_file_path:
|
||||
# The path to the certificate file.
|
||||
cert_file_path:
|
||||
|
||||
##################### Sharder (experimental) #####################
|
||||
sharder:
|
||||
# Specifies the sharder provider to use.
|
||||
provider: noop
|
||||
single:
|
||||
# The org id to which this instance belongs to.
|
||||
org_id: org_id
|
||||
|
||||
##################### Analytics #####################
|
||||
analytics:
|
||||
# Whether to enable analytics.
|
||||
enabled: false
|
||||
segment:
|
||||
# The key to use for segment.
|
||||
key: ""
|
||||
|
||||
##################### StatsReporter #####################
|
||||
statsreporter:
|
||||
# Whether to enable stats reporter. This is used to provide valuable insights to the SigNoz team. It does not collect any sensitive/PII data.
|
||||
enabled: true
|
||||
# The interval at which the stats are collected.
|
||||
interval: 6h
|
||||
collect:
|
||||
# Whether to collect identities and traits (emails).
|
||||
identities: true
|
||||
|
||||
|
||||
##################### Gateway (License only) #####################
|
||||
gateway:
|
||||
# The URL of the gateway's api.
|
||||
url: http://localhost:8080
|
||||
|
||||
@@ -39,7 +39,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
image: signoz/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
@@ -174,13 +174,11 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.76.1
|
||||
image: signoz/signoz:v0.92.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
@@ -196,6 +194,7 @@ services:
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
- SIGNOZ_JWT_SECRET=secret
|
||||
- DOT_METRICS_ENABLED=true
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -208,7 +207,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.34
|
||||
image: signoz/signoz-otel-collector:v0.129.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -232,7 +231,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.34
|
||||
image: signoz/signoz-schema-migrator:v0.129.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -38,7 +38,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
image: signoz/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
@@ -100,28 +100,32 @@ services:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
|
||||
configs:
|
||||
- source: clickhouse-config
|
||||
target: /etc/clickhouse-server/config.xml
|
||||
- source: clickhouse-users
|
||||
target: /etc/clickhouse-server/users.xml
|
||||
- source: clickhouse-custom-function
|
||||
target: /etc/clickhouse-server/custom-function.xml
|
||||
- source: clickhouse-cluster
|
||||
target: /etc/clickhouse-server/config.d/cluster.xml
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.76.1
|
||||
image: signoz/signoz:v0.92.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
configs:
|
||||
- source: signoz-prometheus-config
|
||||
target: /root/config/prometheus.yml
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
@@ -131,6 +135,7 @@ services:
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
- DOT_METRICS_ENABLED=true
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -143,15 +148,17 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.34
|
||||
image: signoz/signoz-otel-collector:v0.129.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
configs:
|
||||
- source: otel-collector-config
|
||||
target: /etc/otel-collector-config.yaml
|
||||
- source: otel-manager-config
|
||||
target: /etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
@@ -167,7 +174,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.34
|
||||
image: signoz/signoz-schema-migrator:v0.129.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@@ -188,3 +195,24 @@ volumes:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
configs:
|
||||
clickhouse-config:
|
||||
file: ../common/clickhouse/config.xml
|
||||
clickhouse-users:
|
||||
file: ../common/clickhouse/users.xml
|
||||
clickhouse-custom-function:
|
||||
file: ../common/clickhouse/custom-function.xml
|
||||
clickhouse-cluster:
|
||||
file: ../common/clickhouse/cluster.xml
|
||||
signoz-prometheus-config:
|
||||
file: ../common/signoz/prometheus.yml
|
||||
# If you have multiple dashboard files, you can list them individually:
|
||||
# dashboard-foo:
|
||||
# file: ../common/dashboards/foo.json
|
||||
# dashboard-bar:
|
||||
# file: ../common/dashboards/bar.json
|
||||
|
||||
otel-collector-config:
|
||||
file: ./otel-collector-config.yaml
|
||||
otel-manager-config:
|
||||
file: ../common/signoz/otel-collector-opamp-config.yaml
|
||||
|
||||
@@ -26,7 +26,7 @@ processors:
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_exporter: signozclickhousemetrics
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
@@ -60,25 +60,16 @@ exporters:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
@@ -90,11 +81,11 @@ service:
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite, signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
|
||||
@@ -42,7 +42,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
image: signoz/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
@@ -177,12 +177,10 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${DOCKER_TAG:-v0.76.1}
|
||||
image: signoz/signoz:${VERSION:-v0.92.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -199,6 +197,7 @@ services:
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- DOT_METRICS_ENABLED=true
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -212,7 +211,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.0}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -238,7 +237,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -249,7 +248,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${DOCKER_TAG:-v0.76.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --gateway-url=https://api.staging.signoz.cloud
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
|
||||
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
signoz:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
restart: on-failure
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
volumes:
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
@@ -38,7 +38,7 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
image: signoz/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
@@ -110,12 +110,10 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${DOCKER_TAG:-v0.76.1}
|
||||
image: signoz/signoz:${VERSION:-v0.92.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
@@ -132,6 +130,7 @@ services:
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- DOT_METRICS_ENABLED=true
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
@@ -144,7 +143,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.0}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +165,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +177,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -26,7 +26,7 @@ processors:
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_exporter: signozclickhousemetrics
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
@@ -60,25 +60,16 @@ exporters:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true
|
||||
clickhousemetricswrite/prometheus:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
@@ -90,11 +81,11 @@ service:
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite, signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
|
||||
@@ -93,7 +93,7 @@ check_os() {
|
||||
;;
|
||||
Red\ Hat*)
|
||||
desired_os=1
|
||||
os="red hat"
|
||||
os="rhel"
|
||||
package_manager="yum"
|
||||
;;
|
||||
CentOS*)
|
||||
|
||||
@@ -44,24 +44,39 @@ Before diving in, make sure you have these tools installed:
|
||||
|
||||
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
|
||||
|
||||
### 1. Setting up Clickhouse
|
||||
### 1. Setting up ClickHouse
|
||||
|
||||
First, we need to get Clickhouse running:
|
||||
First, we need to get ClickHouse running:
|
||||
|
||||
```bash
|
||||
make devenv-clickhouse
|
||||
```
|
||||
|
||||
This command:
|
||||
- Starts Clickhouse in a single-shard, single-replica cluster
|
||||
- Starts ClickHouse in a single-shard, single-replica cluster
|
||||
- Sets up Zookeeper
|
||||
- Runs the latest schema migrations
|
||||
|
||||
### 2. Starting the Backend
|
||||
### 2. Setting up SigNoz OpenTelemetry Collector
|
||||
|
||||
Next, start the OpenTelemetry Collector to receive telemetry data:
|
||||
|
||||
```bash
|
||||
make devenv-signoz-otel-collector
|
||||
```
|
||||
|
||||
This command:
|
||||
- Starts the SigNoz OpenTelemetry Collector
|
||||
- Listens on port 4317 (gRPC) and 4318 (HTTP) for incoming telemetry data
|
||||
- Forwards data to ClickHouse for storage
|
||||
|
||||
> 💡 **Quick Setup**: Use `make devenv-up` to start both ClickHouse and OTel Collector together
|
||||
|
||||
### 3. Starting the Backend
|
||||
|
||||
1. Run the backend server:
|
||||
```bash
|
||||
make run-go
|
||||
make go-run-community
|
||||
```
|
||||
|
||||
2. Verify it's working:
|
||||
@@ -73,19 +88,24 @@ This command:
|
||||
|
||||
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
|
||||
|
||||
### 3. Setting up the Frontend
|
||||
### 4. Setting up the Frontend
|
||||
|
||||
1. Install dependencies:
|
||||
1. Navigate to the frontend directory:
|
||||
```bash
|
||||
cd frontend
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
```bash
|
||||
yarn install
|
||||
```
|
||||
|
||||
2. Create a `.env` file in the `frontend` directory:
|
||||
3. Create a `.env` file in this directory:
|
||||
```env
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
3. Start the development server:
|
||||
4. Start the development server:
|
||||
```bash
|
||||
yarn dev
|
||||
```
|
||||
@@ -93,3 +113,25 @@ This command:
|
||||
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
|
||||
|
||||
Now you're all set to start developing! Happy coding! 🎉
|
||||
|
||||
## Verifying Your Setup
|
||||
To verify everything is working correctly:
|
||||
|
||||
1. **Check ClickHouse**: `curl http://localhost:8123/ping` (should return "Ok.")
|
||||
2. **Check OTel Collector**: `curl http://localhost:13133` (should return health status)
|
||||
3. **Check Backend**: `curl http://localhost:8080/api/v1/health` (should return `{"status":"ok"}`)
|
||||
4. **Check Frontend**: Open `http://localhost:3301` in your browser
|
||||
|
||||
## How to send test data?
|
||||
|
||||
You can now send telemetry data to your local SigNoz instance:
|
||||
|
||||
- **OTLP gRPC**: `localhost:4317`
|
||||
- **OTLP HTTP**: `localhost:4318`
|
||||
|
||||
For example, using `curl` to send a test trace:
|
||||
```bash
|
||||
curl -X POST http://localhost:4318/v1/traces \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"resourceSpans":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"test-service"}}]},"scopeSpans":[{"spans":[{"traceId":"12345678901234567890123456789012","spanId":"1234567890123456","name":"test-span","startTimeUnixNano":"1609459200000000000","endTimeUnixNano":"1609459201000000000"}]}]}]}'
|
||||
```
|
||||
|
||||
51
docs/contributing/go/endpoint.md
Normal file
51
docs/contributing/go/endpoint.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Endpoint
|
||||
|
||||
This guide outlines the recommended approach for designing endpoints, with a focus on entity relationships, RESTful structure, and examples from the codebase.
|
||||
|
||||
## How do we design an endpoint?
|
||||
|
||||
### Understand the core entities and their relationships
|
||||
|
||||
Start with understanding the core entities and their relationships. For example:
|
||||
|
||||
- **Organization**: an organization can have multiple users
|
||||
|
||||
### Structure Endpoints RESTfully
|
||||
|
||||
Endpoints should reflect the resource hierarchy and follow RESTful conventions. Use clear, **pluralized resource names** and versioning. For example:
|
||||
|
||||
- `POST /v1/organizations` — Create an organization
|
||||
- `GET /v1/organizations/:id` — Get an organization by id
|
||||
- `DELETE /v1/organizations/:id` — Delete an organization by id
|
||||
- `PUT /v1/organizations/:id` — Update an organization by id
|
||||
- `GET /v1/organizations/:id/users` — Get all users in an organization
|
||||
- `GET /v1/organizations/me/users` — Get all users in my organization
|
||||
|
||||
Think in terms of resource navigation in a file system. For example, to find your organization, you would navigate to the root of the file system and then to the `organizations` directory. To find a user in an organization, you would navigate to the `organizations` directory and then to the `id` directory.
|
||||
|
||||
```bash
|
||||
v1/
|
||||
├── organizations/
|
||||
│ └── 123/
|
||||
│ └── users/
|
||||
```
|
||||
|
||||
`me` endpoints are special. They are used to determine the actual id via some auth/external mechanism. For `me` endpoints, think of the `me` directory being symlinked to your organization directory. For example, if you are a part of the organization `123`, the `me` directory will be symlinked to `/v1/organizations/123`:
|
||||
|
||||
```bash
|
||||
v1/
|
||||
├── organizations/
|
||||
│ └── me/ -> symlink to /v1/organizations/123
|
||||
│ └── users/
|
||||
│ └── 123/
|
||||
│ └── users/
|
||||
```
|
||||
|
||||
> 💡 **Note**: There are various ways to structure endpoints. Some prefer to use singular resource names instead of `me`. Others prefer to use singular resource names for all endpoints. We have, however, chosen to standardize our endpoints in the manner described above.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Use clear, **plural resource names**
|
||||
- Use `me` endpoints for determining the actual id via some auth mechanism
|
||||
|
||||
> 💡 **Note**: When in doubt, diagram the relationships and walk through the user flows as if navigating a file system. This will help you design endpoints that are both logical and user-friendly.
|
||||
103
docs/contributing/go/errors.md
Normal file
103
docs/contributing/go/errors.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Errors
|
||||
|
||||
SigNoz includes its own structured [errors](/pkg/errors/errors.go) package. It's built on top of Go's `error` interface, extending it to add additional context that helps provide more meaningful error messages throughout the application.
|
||||
|
||||
## How to use it?
|
||||
|
||||
To use the SigNoz structured errors package, use these functions instead of the standard library alternatives:
|
||||
|
||||
```go
|
||||
// Instead of errors.New()
|
||||
errors.New(typ, code, message)
|
||||
|
||||
// Instead of fmt.Errorf()
|
||||
errors.Newf(typ, code, message, args...)
|
||||
```
|
||||
|
||||
### Typ
|
||||
The Typ (read as Type, defined as `typ`) is used to categorize errors across the codebase and is loosely coupled with HTTP/GRPC status codes. All predefined types can be found in [pkg/errors/type.go](/pkg/errors/type.go). For example:
|
||||
|
||||
- `TypeInvalidInput` - Indicates invalid input was provided
|
||||
- `TypeNotFound` - Indicates a resource was not found
|
||||
|
||||
By design, `typ` is unexported and cannot be declared outside of [errors](/pkg/errors/errors.go) package. This ensures that it is consistent across the codebase and is used in a way that is meaningful.
|
||||
|
||||
### Code
|
||||
Codes are used to provide more granular categorization within types. For instance, a type of `TypeInvalidInput` might have codes like `CodeInvalidEmail` or `CodeInvalidPassword`.
|
||||
|
||||
To create new error codes, use the `errors.MustNewCode` function:
|
||||
|
||||
```go
|
||||
var (
|
||||
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
|
||||
CodeThingNotFound = errors.MustNewCode("thing_not_found")
|
||||
)
|
||||
```
|
||||
|
||||
> 💡 **Note**: Error codes must match the regex `^[a-z_]+$` otherwise the code will panic.
|
||||
|
||||
## Show me some examples
|
||||
|
||||
### Using the error
|
||||
A basic example of using the error:
|
||||
|
||||
```go
|
||||
var (
|
||||
CodeThingAlreadyExists = errors.MustNewCode("thing_already_exists")
|
||||
)
|
||||
|
||||
func CreateThing(id string) error {
|
||||
t, err := thing.GetFromStore(id)
|
||||
if err != nil {
|
||||
if errors.As(err, errors.TypeNotFound) {
|
||||
// thing was not found, create it
|
||||
return thing.Create(id)
|
||||
}
|
||||
|
||||
// something else went wrong, wrap the error with more context
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeUnknown, "failed to get thing from store")
|
||||
}
|
||||
|
||||
return errors.Newf(errors.TypeAlreadyExists, CodeThingAlreadyExists, "thing with id %s already exists", id)
|
||||
}
|
||||
```
|
||||
|
||||
### Changing the error
|
||||
Sometimes you may want to change the error while preserving the message:
|
||||
|
||||
```go
|
||||
func GetUserSecurely(id string) (*User, error) {
|
||||
user, err := repository.GetUser(id)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
// Convert NotFound to Forbidden for security reasons
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeAccessDenied, "access denied to requested resource")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return user, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Why do we need this?
|
||||
|
||||
In a large codebase like SigNoz, error handling is critical for maintaining reliability, debuggability, and a good user experience. We believe that it is the **responsibility of a function** to return **well-defined** errors that **accurately describe what went wrong**. With our structured error system:
|
||||
|
||||
- Functions can create precise errors with appropriate additional context
|
||||
- Callers can make informed decisions based on the additional context
|
||||
- Error context is preserved and enhanced as it moves up the call stack
|
||||
|
||||
The caller (which can be another function or a HTTP/gRPC handler or something else entirely), can then choose to use this error to take appropriate actions such as:
|
||||
|
||||
- A function can branch into different paths based on the context
|
||||
- An HTTP/gRPC handler can derive the correct status code and message from the error and send it to the client
|
||||
- Logging systems can capture structured error information for better diagnostics
|
||||
|
||||
Although there might be cases where this might seem too verbose, it makes the code more maintainable and consistent. A little verbose code is better than clever code that doesn't provide enough context.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Think about error handling as you write your code, not as an afterthought.
|
||||
- Always use the [errors](/pkg/errors/errors.go) package instead of the standard library's `errors.New()` or `fmt.Errorf()`.
|
||||
- Always assign appropriate codes to errors when creating them instead of using the "catch all" error codes defined in [pkg/errors/code.go](/pkg/errors/code.go).
|
||||
- Use `errors.Wrapf()` to add context to errors while preserving the original when appropriate.
|
||||
106
docs/contributing/go/provider.md
Normal file
106
docs/contributing/go/provider.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Provider
|
||||
|
||||
SigNoz is built on the provider pattern, a design approach where code is organized into providers that handle specific application responsibilities. Providers act as adapter components that integrate with external services and deliver required functionality to the application.
|
||||
|
||||
> 💡 **Note**: Coming from a DDD background? Providers are similar (not exactly the same) to adapter/infrastructure services.
|
||||
|
||||
## How to create a new provider?
|
||||
|
||||
To create a new provider, create a directory in the `pkg/` directory named after your provider. The provider package consists of four key components:
|
||||
|
||||
- **Interface** (`pkg/<name>/<name>.go`): Defines the provider's interface. Other packages should import this interface to use the provider.
|
||||
- **Config** (`pkg/<name>/config.go`): Contains provider configuration, implementing the `factory.Config` interface from [factory/config.go](/pkg/factory/config.go).
|
||||
- **Implementation** (`pkg/<name>/<implname><name>/provider.go`): Contains the provider implementation, including a `NewProvider` function that returns a `factory.Provider` interface from [factory/provider.go](/pkg/factory/provider.go).
|
||||
- **Mock** (`pkg/<name>/<name>test.go`): Provides mocks for the provider, typically used by dependent packages for unit testing.
|
||||
|
||||
For example, the [prometheus](/pkg/prometheus) provider delivers a prometheus engine to the application:
|
||||
|
||||
- `pkg/prometheus/prometheus.go` - Interface definition
|
||||
- `pkg/prometheus/config.go` - Configuration
|
||||
- `pkg/prometheus/clickhouseprometheus/provider.go` - Clickhouse-powered implementation
|
||||
- `pkg/prometheus/prometheustest/provider.go` - Mock implementation
|
||||
|
||||
## How to wire it up?
|
||||
|
||||
The `pkg/signoz` package contains the inversion of control container responsible for wiring providers. It handles instantiation, configuration, and assembly of providers based on configuration metadata.
|
||||
|
||||
> 💡 **Note**: Coming from a Java background? Providers are similar to Spring beans.
|
||||
|
||||
Wiring up a provider involves three steps:
|
||||
|
||||
1. Wiring up the configuration
|
||||
Add your config from `pkg/<name>/config.go` to the `pkg/signoz/config.Config` struct and in new factories:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
...
|
||||
MyProvider myprovider.Config `mapstructure:"myprovider"`
|
||||
...
|
||||
}
|
||||
|
||||
func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, ....) (Config, error) {
|
||||
...
|
||||
configFactories := []factory.ConfigFactory{
|
||||
myprovider.NewConfigFactory(),
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
2. Wiring up the provider
|
||||
Add available provider implementations in `pkg/signoz/provider.go`:
|
||||
|
||||
```go
|
||||
func NewMyProviderFactories() factory.NamedMap[factory.ProviderFactory[myprovider.MyProvider, myprovider.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
myproviderone.NewFactory(),
|
||||
myprovidertwo.NewFactory(),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
3. Instantiate the provider by adding it to the `SigNoz` struct in `pkg/signoz/signoz.go`:
|
||||
|
||||
```go
|
||||
type SigNoz struct {
|
||||
...
|
||||
MyProvider myprovider.MyProvider
|
||||
...
|
||||
}
|
||||
|
||||
func New(...) (*SigNoz, error) {
|
||||
...
|
||||
myprovider, err := myproviderone.New(ctx, settings, config.MyProvider, "one/two")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## How to use it?
|
||||
|
||||
To use a provider, import its interface. For example, to use the prometheus provider, import `pkg/prometheus/prometheus.go`:
|
||||
|
||||
```go
|
||||
import "github.com/SigNoz/signoz/pkg/prometheus/prometheus"
|
||||
|
||||
func CreateSomething(ctx context.Context, prometheus prometheus.Prometheus) {
|
||||
...
|
||||
prometheus.DoSomething()
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Why do we need this?
|
||||
|
||||
Like any dependency injection framework, providers decouple the codebase from implementation details. This is especially valuable in SigNoz's large codebase, where we need to swap implementations without changing dependent code. The provider pattern offers several benefits apart from the obvious one of decoupling:
|
||||
|
||||
- Configuration is **defined with each provider and centralized in one place**, making it easier to understand and manage through various methods (environment variables, config files, etc.)
|
||||
- Provider mocking is **straightforward for unit testing**, with a consistent pattern for locating mocks
|
||||
- **Multiple implementations** of the same provider are **supported**, as demonstrated by our sqlstore provider
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Use the provider pattern wherever applicable.
|
||||
- Always create a provider **irrespective of the number of implementations**. This makes it easier to add new implementations in the future.
|
||||
11
docs/contributing/go/readme.md
Normal file
11
docs/contributing/go/readme.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Go
|
||||
|
||||
This document provides an overview of contributing to the SigNoz backend written in Go. The SigNoz backend is built with Go, focusing on performance, maintainability, and developer experience. We strive for clean, idiomatic code that follows established Go practices while addressing the unique needs of an observability platform.
|
||||
|
||||
We adhere to three primary style guides as our foundation:
|
||||
|
||||
- [Effective Go](https://go.dev/doc/effective_go) - For writing idiomatic Go code
|
||||
- [Code Review Comments](https://go.dev/wiki/CodeReviewComments) - For understanding common comments in code reviews
|
||||
- [Google Style Guide](https://google.github.io/styleguide/go/) - Additional practices from Google
|
||||
|
||||
We **recommend** (almost enforce) reviewing these guides before contributing to the codebase. They provide valuable insights into writing idiomatic Go code and will help you understand our approach to backend development. In addition, we have a few additional rules that make certain areas stricter than the above which can be found in area-specific files in this package.
|
||||
94
docs/contributing/go/sql.md
Normal file
94
docs/contributing/go/sql.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# SQL
|
||||
SigNoz utilizes a relational database to store metadata including organization information, user data and other settings.
|
||||
|
||||
## How to use it?
|
||||
|
||||
The database interface is defined in [SQLStore](/pkg/sqlstore/sqlstore.go). SigNoz leverages the Bun ORM to interact with the underlying database. To access the database instance, use the `BunDBCtx` function. For operations that require transactions across multiple database operations, use the `RunInTxCtx` function. This function embeds a transaction in the context, which propagates through various functions in the callback.
|
||||
|
||||
```go
|
||||
type Thing struct {
|
||||
bun.BaseModel
|
||||
|
||||
ID types.Identifiable `bun:",embed"`
|
||||
SomeColumn string `bun:"some_column"`
|
||||
TimeAuditable types.TimeAuditable `bun:",embed"`
|
||||
OrgID string `bun:"org_id"`
|
||||
}
|
||||
|
||||
func GetThing(ctx context.Context, id string) (*Thing, error) {
|
||||
thing := new(Thing)
|
||||
err := sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(thing).
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
|
||||
return thing, err
|
||||
}
|
||||
|
||||
func CreateThing(ctx context.Context, thing *Thing) error {
|
||||
return sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewInsert().
|
||||
Model(thing).
|
||||
Exec(ctx)
|
||||
}
|
||||
```
|
||||
|
||||
> 💡 **Note**: Always use line breaks while working with SQL queries to enhance code readability.
|
||||
|
||||
> 💡 **Note**: Always use the `new` function to create new instances of structs.
|
||||
|
||||
## What are hooks?
|
||||
|
||||
Hooks are user-defined functions that execute before and/or after specific database operations. These hooks are particularly useful for generating telemetry data such as logs, traces, and metrics, providing visibility into database interactions. Hooks are defined in the [SQLStoreHook](/pkg/sqlstore/sqlstore.go) interface.
|
||||
|
||||
## How is the schema designed?
|
||||
|
||||
SigNoz implements a star schema design with the organizations table as the central entity. All other tables link to the organizations table via foreign key constraints on the `org_id` column. This design ensures that every entity within the system is either directly or indirectly associated with an organization.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
ORGANIZATIONS {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
}
|
||||
ENTITY_A {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
string org_id FK
|
||||
}
|
||||
ENTITY_B {
|
||||
string id PK
|
||||
timestamp created_at
|
||||
timestamp updated_at
|
||||
string org_id FK
|
||||
}
|
||||
|
||||
ORGANIZATIONS ||--o{ ENTITY_A : contains
|
||||
ORGANIZATIONS ||--o{ ENTITY_B : contains
|
||||
```
|
||||
|
||||
> 💡 **Note**: There are rare exceptions to the above star schema design. Consult with the maintainers before deviating from the above design.
|
||||
|
||||
All tables follow a consistent primary key pattern using a `id` column (referenced by the `types.Identifiable` struct) and include `created_at` and `updated_at` columns (referenced by the `types.TimeAuditable` struct) for audit purposes.
|
||||
|
||||
## How to write migrations?
|
||||
|
||||
For schema migrations, use the [SQLMigration](/pkg/sqlmigration/sqlmigration.go) interface and write the migration in the same package. When creating migrations, adhere to these guidelines:
|
||||
|
||||
- Do not implement **`ON CASCADE` foreign key constraints**. Deletion operations should be handled explicitly in application logic rather than delegated to the database.
|
||||
- Do not **import types from the types package** in the `sqlmigration` package. Instead, define the required types within the migration package itself. This practice ensures migration stability as the core types evolve over time.
|
||||
- Do not implement **`Down` migrations**. As the codebase matures, we may introduce this capability, but for now, the `Down` function should remain empty.
|
||||
- Always write **idempotent** migrations. This means that if the migration is run multiple times, it should not cause an error.
|
||||
- A migration which is **dependent on the underlying dialect** (sqlite, postgres, etc) should be written as part of the [SQLDialect](/pkg/sqlstore/sqlstore.go) interface. The implementation needs to go in the dialect specific package of the respective database.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Use `BunDBCtx` and `RunInTxCtx` to access the database instance and execute transactions respectively.
|
||||
- While designing new tables, ensure the consistency of `id`, `created_at`, `updated_at` and an `org_id` column with a foreign key constraint to the `organizations` table (unless the table serves as a transitive entity not directly associated with an organization but indirectly associated with one).
|
||||
- Implement deletion logic in the application rather than relying on cascading deletes in the database.
|
||||
- While writing migrations, adhere to the guidelines mentioned above.
|
||||
BIN
docs/img/otel-demo-docker-containers.png
Normal file
BIN
docs/img/otel-demo-docker-containers.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 143 KiB |
BIN
docs/img/otel-demo-helm.png
Normal file
BIN
docs/img/otel-demo-helm.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 157 KiB |
BIN
docs/img/otel-demo-pods.png
Normal file
BIN
docs/img/otel-demo-pods.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 76 KiB |
BIN
docs/img/otel-demo-services.png
Normal file
BIN
docs/img/otel-demo-services.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 306 KiB |
246
docs/otel-demo-docs.md
Normal file
246
docs/otel-demo-docs.md
Normal file
@@ -0,0 +1,246 @@
|
||||
# Configuring OpenTelemetry Demo App with SigNoz
|
||||
|
||||
[The OpenTelemetry Astronomy Shop](https://github.com/open-telemetry/opentelemetry-demo) is an e-commerce web application, with **15 core microservices** in a **distributed system** which communicate over gRPC. Designed as a **polyglot** environment, it leverages a diverse set of programming languages, including Go, Python, .NET, Java, and others, showcasing cross-language instrumentation with OpenTelemetry. The intention is to get a quickstart application to send data and experience SigNoz firsthand.
|
||||
|
||||
This guide provides a step-by-step walkthrough for setting up the **OpenTelemetry Demo App** with **SigNoz** as backend for observability. It outlines steps to export telemetry data to **SigNoz self-hosted with Docker**, **SigNoz self-hosted with Kubernetes** and **SigNoz cloud**.
|
||||
<br/>
|
||||
|
||||
__Table of Contents__
|
||||
- [Send data to SigNoz Self-hosted with Docker](#send-data-to-signoz-self-hosted-with-docker)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Clone the OpenTelemetry Demo App Repository](#clone-the-opentelemetry-demo-app-repository)
|
||||
- [Modify OpenTelemetry Collector Config](#modify-opentelemetry-collector-config)
|
||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app)
|
||||
- [Monitor with SigNoz (Docker)](#monitor-with-signoz-docker)
|
||||
- [Send data to SigNoz Self-hosted with Kubernetes](#send-data-to-signoz-self-hosted-with-kubernetes)
|
||||
- [Prerequisites](#prerequisites-1)
|
||||
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
|
||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
|
||||
- [Monitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
|
||||
- [What's next](#whats-next)
|
||||
|
||||
|
||||
# Send data to SigNoz Self-hosted with Docker
|
||||
|
||||
In this guide you will install the OTel demo application using Docker and send telemetry data to SigNoz hosted with Docker, referred as SigNoz [Docker] from now.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
- Docker and Docker Compose installed
|
||||
- 6 GB of RAM for the application [as per OpenTelemetry documentation]
|
||||
- Nice to have Docker Desktop, for easy monitoring
|
||||
|
||||
|
||||
## Clone the OpenTelemetry Demo App Repository
|
||||
Clone the OTel demo app to any folder of your choice.
|
||||
```sh
|
||||
# Clone the OpenTelemetry Demo repository
|
||||
git clone https://github.com/open-telemetry/opentelemetry-demo.git
|
||||
cd opentelemetry-demo
|
||||
```
|
||||
|
||||
## Modify OpenTelemetry Collector Config
|
||||
|
||||
By default, the collector in the demo application will merge the configuration from two files:
|
||||
|
||||
1. otelcol-config.yml [we don't touch this]
|
||||
2. otelcol-config-extras.yml [we modify this]
|
||||
|
||||
To add SigNoz [Docker] as the backend, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
|
||||
```yaml
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "http://host.docker.internal:4317"
|
||||
tls:
|
||||
insecure: true
|
||||
debug:
|
||||
verbosity: detailed
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
|
||||
The SigNoz OTel collector [sigNoz's otel-collector service] listens at 4317 port on localhost. When the OTel demo app is running within a Docker container and needs to transmit telemetry data to SigNoz, it cannot directly reference 'localhost' as this would refer to the container's own internal network. Instead, Docker provides a special DNS name, `host.docker.internal`, which resolves to the host machine's IP address from within containers. By configuring the OpenTelemetry Demo application to send data to `host.docker.internal:4317`, we establish a network path that allows the containerized application to transmit telemetry data across the container boundary to the SigNoz OTel collector running on the host machine's port 4317.
|
||||
|
||||
>
|
||||
> Note: When merging extra configuration values with the existing collector config (`src/otel-collector/otelcol-config.yml`), objects are merged and arrays are replaced resulting in previous pipeline configurations getting overridden.
|
||||
The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
|
||||
>
|
||||
<br>
|
||||
<u>To send data to SigNoz Cloud</u>
|
||||
|
||||
If you want to send data to cloud instead, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
|
||||
```yaml
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
signoz-access-token: <SIGNOZ-KEY>
|
||||
debug:
|
||||
verbosity: detailed
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
Remember to replace the region and ingestion key with proper values as obtained from your account.
|
||||
|
||||
|
||||
## Start the OpenTelemetry Demo App
|
||||
|
||||
Both SigNoz and OTel demo app [frontend-proxy service, to be accurate] share common port allocation at 8080. To prevent port allocation conflicts, modify the OTel demo application config to use port 8081 as the `ENVOY_PORT` value as shown below, and run docker compose command.
|
||||
|
||||
```sh
|
||||
ENVOY_PORT=8081 docker compose up -d
|
||||
```
|
||||
This spins up multiple microservices, with OpenTelemetry instrumentation enabled. you can verify this by,
|
||||
```sh
|
||||
docker compose ps -a
|
||||
```
|
||||
The result should look similar to this,
|
||||

|
||||
|
||||
|
||||
|
||||
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Docker].
|
||||
|
||||
## Monitor with SigNoz [Docker]
|
||||
Signoz exposes its UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
|
||||
|
||||
|
||||

|
||||
|
||||
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Docker] as expected.
|
||||
|
||||
|
||||
# Send data to SigNoz Self-hosted with Kubernetes
|
||||
|
||||
In this guide you will install the OTel demo application using Helm and send telemetry data to SigNoz hosted with Kubernetes, referred as SigNoz [Kubernetes] from now.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Helm charts installed
|
||||
- 6 GB of free RAM for the application [as per OpenTelemetry documentation]
|
||||
- A kubernetes cluster (EKS, GKE, Minikube)
|
||||
- kubectl [CLI for Kubernetes]
|
||||
|
||||
>Note: We will be installing OTel demo app using Helm charts, since it is recommended by OpenTelemetry. If you wish to install using kubectl, follow [this](https://opentelemetry.io/docs/demo/kubernetes-deployment/#install-using-kubectl).
|
||||
|
||||
|
||||
## Install Helm Repo and Charts
|
||||
You’ll need to **install the Helm repository** to start sending data to SigNoz cloud.
|
||||
|
||||
```sh
|
||||
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
|
||||
```
|
||||
The OpenTelemetry Collector’s configuration is exposed in the Helm chart. All additions made will be merged into the default configuration. We use this capability to add SigNoz as an exporter, and make pipelines as desired.
|
||||
|
||||
For this we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
|
||||
|
||||
```yaml
|
||||
default:
|
||||
env:
|
||||
- name: OTEL_SERVICE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: "metadata.labels['app.kubernetes.io/component']"
|
||||
- name: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
|
||||
value: cumulative
|
||||
- name: OTEL_RESOURCE_ATTRIBUTES
|
||||
value: 'service.name=$(OTEL_SERVICE_NAME),service.namespace=opentelemetry-demo'
|
||||
- name: OTEL_COLLECTOR_NAME
|
||||
value: signoz-otel-collector.<namespace>.svc.cluster.local
|
||||
```
|
||||
Replace namespace with your appropriate namespace. This file will replace the chart’s existing settings with our new ones, ensuring telemetry data is sent to SigNoz [Kubernetes].
|
||||
|
||||
> Note: When merging YAML values with Helm, objects are merged and arrays are replaced. The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
|
||||
|
||||
<br>
|
||||
<u>To send data to SigNoz cloud</u>
|
||||
|
||||
If you wish to send data to cloud instance of SigNoz, we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
|
||||
|
||||
```sh
|
||||
opentelemetry-collector:
|
||||
config:
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
signoz-access-token: <SIGNOZ-KEY>
|
||||
debug:
|
||||
verbosity: detailed
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [spanmetrics, otlp]
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
exporters: [otlp]
|
||||
```
|
||||
Make sure to replace the region and key with values obtained from the account
|
||||
|
||||
Now **install the helm chart** with a release name and namespace of your choice. Let's take *my-otel-demo* as the release name and *otel-demo* as the namespace for the context of the code snippet below,
|
||||
|
||||
```sh
|
||||
# Create a new Kubernetes namespace called "otel-demo"
|
||||
kubectl create namespace otel-demo
|
||||
# Install the OpenTelemetry Demo Helm chart with the release name "my-otel-demo"
|
||||
helm install my-otel-demo open-telemetry/opentelemetry-demo --namespace otel-demo -f values.yaml
|
||||
```
|
||||
You should see a similar output on your terminal,
|
||||

|
||||
|
||||
To verify if all the pods are running,
|
||||
```sh
|
||||
kubectl get pods -n otel-demo
|
||||
```
|
||||
The output should look similar to this,
|
||||
|
||||

|
||||
|
||||
## Start the OpenTelemetry Demo App
|
||||
|
||||
To expose the OTel demo app UI [frontend-proxy service] use the following command (replace my-otel-demo with your Helm chart release name):
|
||||
|
||||
```sh
|
||||
kubectl port-forward svc/my-otel-demo-frontend-proxy 8080:8081
|
||||
```
|
||||
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Kubernetes].
|
||||
|
||||
|
||||
|
||||
## Monitor with SigNoz [Kubernetes]
|
||||
Signoz exposes it's UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
|
||||
|
||||
|
||||

|
||||
|
||||
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Kubernetes] as expected.
|
||||
|
||||
|
||||
|
||||
# What's next?
|
||||
|
||||
|
||||
|
||||
Don't forget to check our OpenTelemetry [track](https://signoz.io/resource-center/opentelemetry/), guaranteed to take you from a newbie to sensei in no time!
|
||||
|
||||
Also from a fellow OTel fan to another, we at [SigNoz](https://signoz.io/) are building an open-source, OTel native, observability platform (one of its kind). So, show us love - star us on [GitHub](https://github.com/SigNoz/signoz), nitpick our [docs](https://signoz.io/docs/introduction/), or just tell your app we’re the ones who’ll catch its crashes mid-flight and finally shush all the 3am panic calls!
|
||||
34
ee/anomaly/daily.go
Normal file
34
ee/anomaly/daily.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*DailyProvider)(nil)
|
||||
|
||||
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &dp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
|
||||
dp := &DailyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(dp)
|
||||
}
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
35
ee/anomaly/hourly.go
Normal file
35
ee/anomaly/hourly.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*HourlyProvider)(nil)
|
||||
|
||||
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &hp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// NewHourlyProvider now uses the generic option type
|
||||
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
|
||||
hp := &HourlyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(hp)
|
||||
}
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
223
ee/anomaly/params.go
Normal file
223
ee/anomaly/params.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Seasonality struct{ valuer.String }
|
||||
|
||||
var (
|
||||
SeasonalityHourly = Seasonality{valuer.NewString("hourly")}
|
||||
SeasonalityDaily = Seasonality{valuer.NewString("daily")}
|
||||
SeasonalityWeekly = Seasonality{valuer.NewString("weekly")}
|
||||
)
|
||||
|
||||
var (
|
||||
oneWeekOffset = uint64(24 * 7 * time.Hour.Milliseconds())
|
||||
oneDayOffset = uint64(24 * time.Hour.Milliseconds())
|
||||
oneHourOffset = uint64(time.Hour.Milliseconds())
|
||||
fiveMinOffset = uint64(5 * time.Minute.Milliseconds())
|
||||
)
|
||||
|
||||
func (s Seasonality) IsValid() bool {
|
||||
switch s {
|
||||
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type AnomaliesRequest struct {
|
||||
Params qbtypes.QueryRangeRequest
|
||||
Seasonality Seasonality
|
||||
}
|
||||
|
||||
type AnomaliesResponse struct {
|
||||
Results []*qbtypes.TimeSeriesData
|
||||
}
|
||||
|
||||
// anomalyParams is the params for anomaly detection
|
||||
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
|
||||
//
|
||||
// ^ ^
|
||||
// | |
|
||||
// (rounded value for past peiod) + (seasonal growth)
|
||||
//
|
||||
// score = abs(value - prediction) / stddev (current_season_query)
|
||||
type anomalyQueryParams struct {
|
||||
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
|
||||
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
|
||||
// The results obtained from this query are used to compare with predicted values
|
||||
// and to detect anomalies
|
||||
CurrentPeriodQuery qbtypes.QueryRangeRequest
|
||||
// PastPeriodQuery is the query range params for past period of seasonality
|
||||
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||
PastPeriodQuery qbtypes.QueryRangeRequest
|
||||
// CurrentSeasonQuery is the query range params for current period (seasonal)
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
|
||||
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
|
||||
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
|
||||
CurrentSeasonQuery qbtypes.QueryRangeRequest
|
||||
// PastSeasonQuery is the query range params for past seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
|
||||
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||
PastSeasonQuery qbtypes.QueryRangeRequest
|
||||
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
|
||||
Past2SeasonQuery qbtypes.QueryRangeRequest
|
||||
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
|
||||
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
|
||||
Past3SeasonQuery qbtypes.QueryRangeRequest
|
||||
}
|
||||
|
||||
func prepareAnomalyQueryParams(req qbtypes.QueryRangeRequest, seasonality Seasonality) *anomalyQueryParams {
|
||||
start := req.Start
|
||||
end := req.End
|
||||
|
||||
currentPeriodQuery := qbtypes.QueryRangeRequest{
|
||||
Start: start,
|
||||
End: end,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var pastPeriodStart, pastPeriodEnd uint64
|
||||
|
||||
switch seasonality {
|
||||
// for one week period, we fetch the data from the past week with 5 min offset
|
||||
case SeasonalityWeekly:
|
||||
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneWeekOffset
|
||||
// for one day period, we fetch the data from the past day with 5 min offset
|
||||
case SeasonalityDaily:
|
||||
pastPeriodStart = start - oneDayOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneDayOffset
|
||||
// for one hour period, we fetch the data from the past hour with 5 min offset
|
||||
case SeasonalityHourly:
|
||||
pastPeriodStart = start - oneHourOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneHourOffset
|
||||
}
|
||||
|
||||
pastPeriodQuery := qbtypes.QueryRangeRequest{
|
||||
Start: pastPeriodStart,
|
||||
End: pastPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
// seasonality growth trend
|
||||
var currentGrowthPeriodStart, currentGrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
currentGrowthPeriodStart = start - oneWeekOffset
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityDaily:
|
||||
currentGrowthPeriodStart = start - oneDayOffset
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityHourly:
|
||||
currentGrowthPeriodStart = start - oneHourOffset
|
||||
currentGrowthPeriodEnd = start
|
||||
}
|
||||
|
||||
currentGrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: currentGrowthPeriodStart,
|
||||
End: currentGrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var pastGrowthPeriodStart, pastGrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
pastGrowthPeriodStart = start - 2*oneWeekOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
pastGrowthPeriodStart = start - 2*oneDayOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
pastGrowthPeriodStart = start - 2*oneHourOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneHourOffset
|
||||
}
|
||||
|
||||
pastGrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: pastGrowthPeriodStart,
|
||||
End: pastGrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var past2GrowthPeriodStart, past2GrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past2GrowthPeriodStart = start - 3*oneWeekOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past2GrowthPeriodStart = start - 3*oneDayOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past2GrowthPeriodStart = start - 3*oneHourOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneHourOffset
|
||||
}
|
||||
|
||||
past2GrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: past2GrowthPeriodStart,
|
||||
End: past2GrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var past3GrowthPeriodStart, past3GrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past3GrowthPeriodStart = start - 4*oneWeekOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past3GrowthPeriodStart = start - 4*oneDayOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past3GrowthPeriodStart = start - 4*oneHourOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneHourOffset
|
||||
}
|
||||
|
||||
past3GrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: past3GrowthPeriodStart,
|
||||
End: past3GrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
return &anomalyQueryParams{
|
||||
CurrentPeriodQuery: currentPeriodQuery,
|
||||
PastPeriodQuery: pastPeriodQuery,
|
||||
CurrentSeasonQuery: currentGrowthQuery,
|
||||
PastSeasonQuery: pastGrowthQuery,
|
||||
Past2SeasonQuery: past2GrowthQuery,
|
||||
Past3SeasonQuery: past3GrowthQuery,
|
||||
}
|
||||
}
|
||||
|
||||
type anomalyQueryResults struct {
|
||||
CurrentPeriodResults []*qbtypes.TimeSeriesData
|
||||
PastPeriodResults []*qbtypes.TimeSeriesData
|
||||
CurrentSeasonResults []*qbtypes.TimeSeriesData
|
||||
PastSeasonResults []*qbtypes.TimeSeriesData
|
||||
Past2SeasonResults []*qbtypes.TimeSeriesData
|
||||
Past3SeasonResults []*qbtypes.TimeSeriesData
|
||||
}
|
||||
11
ee/anomaly/provider.go
Normal file
11
ee/anomaly/provider.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error)
|
||||
}
|
||||
463
ee/anomaly/seasonal.go
Normal file
463
ee/anomaly/seasonal.go
Normal file
@@ -0,0 +1,463 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(srikanthccv): make this configurable?
|
||||
movingAvgWindowSize = 7
|
||||
)
|
||||
|
||||
// BaseProvider is an interface that includes common methods for all provider types
|
||||
type BaseProvider interface {
|
||||
GetBaseSeasonalProvider() *BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// GenericProviderOption is a generic type for provider options
|
||||
type GenericProviderOption[T BaseProvider] func(T)
|
||||
|
||||
func WithQuerier[T BaseProvider](querier querier.Querier) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().querier = querier
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger[T BaseProvider](logger *slog.Logger) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
type BaseSeasonalProvider struct {
|
||||
querier querier.Querier
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *AnomaliesRequest) *anomalyQueryParams {
|
||||
if !req.Seasonality.IsValid() {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
}
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) toTSResults(ctx context.Context, resp *qbtypes.QueryRangeResponse) []*qbtypes.TimeSeriesData {
|
||||
|
||||
tsData := []*qbtypes.TimeSeriesData{}
|
||||
|
||||
if resp == nil {
|
||||
p.logger.InfoContext(ctx, "nil response from query range")
|
||||
return tsData
|
||||
}
|
||||
|
||||
for _, item := range resp.Data.Results {
|
||||
if resultData, ok := item.(*qbtypes.TimeSeriesData); ok {
|
||||
tsData = append(tsData, resultData)
|
||||
}
|
||||
}
|
||||
|
||||
return tsData
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
// TODO(srikanthccv): parallelize this?
|
||||
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
|
||||
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
|
||||
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
|
||||
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
|
||||
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
|
||||
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
|
||||
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &anomalyQueryResults{
|
||||
CurrentPeriodResults: p.toTSResults(ctx, currentPeriodResults),
|
||||
PastPeriodResults: p.toTSResults(ctx, pastPeriodResults),
|
||||
CurrentSeasonResults: p.toTSResults(ctx, currentSeasonResults),
|
||||
PastSeasonResults: p.toTSResults(ctx, pastSeasonResults),
|
||||
Past2SeasonResults: p.toTSResults(ctx, past2SeasonResults),
|
||||
Past3SeasonResults: p.toTSResults(ctx, past3SeasonResults),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getMatchingSeries gets the matching series from the query result
|
||||
// for the given series
|
||||
func (p *BaseSeasonalProvider) getMatchingSeries(_ context.Context, queryResult *qbtypes.TimeSeriesData, series *qbtypes.TimeSeries) *qbtypes.TimeSeries {
|
||||
if queryResult == nil || len(queryResult.Aggregations) == 0 || len(queryResult.Aggregations[0].Series) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, curr := range queryResult.Aggregations[0].Series {
|
||||
currLabelsKey := qbtypes.GetUniqueSeriesKey(curr.Labels)
|
||||
seriesLabelsKey := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
if currLabelsKey == seriesLabelsKey {
|
||||
return curr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAvg(series *qbtypes.TimeSeries) float64 {
|
||||
if series == nil || len(series.Values) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, smpl := range series.Values {
|
||||
sum += smpl.Value
|
||||
}
|
||||
return sum / float64(len(series.Values))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getStdDev(series *qbtypes.TimeSeries) float64 {
|
||||
if series == nil || len(series.Values) == 0 {
|
||||
return 0
|
||||
}
|
||||
avg := p.getAvg(series)
|
||||
var sum float64
|
||||
for _, smpl := range series.Values {
|
||||
sum += math.Pow(smpl.Value-avg, 2)
|
||||
}
|
||||
return math.Sqrt(sum / float64(len(series.Values)))
|
||||
}
|
||||
|
||||
// getMovingAvg gets the moving average for the given series
|
||||
// for the given window size and start index
|
||||
func (p *BaseSeasonalProvider) getMovingAvg(series *qbtypes.TimeSeries, movingAvgWindowSize, startIdx int) float64 {
|
||||
if series == nil || len(series.Values) == 0 {
|
||||
return 0
|
||||
}
|
||||
if startIdx >= len(series.Values)-movingAvgWindowSize {
|
||||
startIdx = int(math.Max(0, float64(len(series.Values)-movingAvgWindowSize)))
|
||||
}
|
||||
var sum float64
|
||||
points := series.Values[startIdx:]
|
||||
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
|
||||
for i := 0; i < windowSize; i++ {
|
||||
sum += points[i].Value
|
||||
}
|
||||
avg := sum / float64(windowSize)
|
||||
return avg
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
|
||||
if len(floats) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, f := range floats {
|
||||
sum += f
|
||||
}
|
||||
return sum / float64(len(floats))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
ctx context.Context,
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
|
||||
) *qbtypes.TimeSeries {
|
||||
predictedSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
// for each point in the series, get the predicted value
|
||||
// the predicted value is the moving average (with window size = 7) of the previous period series
|
||||
// plus the average of the current season series
|
||||
// minus the mean of the past season series, past2 season series and past3 season series
|
||||
for idx, curr := range series.Values {
|
||||
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
avg := p.getAvg(currentSeasonSeries)
|
||||
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
predictedValue := movingAvg + avg - mean
|
||||
|
||||
if predictedValue < 0 {
|
||||
// this should not happen (except when the data has extreme outliers)
|
||||
// we will use the moving avg of the previous period series in this case
|
||||
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
|
||||
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
|
||||
p.logger.DebugContext(ctx, "predicted value for series",
|
||||
"anomaly_moving_avg", movingAvg,
|
||||
"anomaly_avg", avg,
|
||||
"anomaly_mean", mean,
|
||||
"anomaly_labels", series.Labels,
|
||||
"anomaly_predicted_value", predictedValue,
|
||||
"anomaly_curr", curr.Value,
|
||||
)
|
||||
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: predictedValue,
|
||||
})
|
||||
}
|
||||
|
||||
return predictedSeries
|
||||
}
|
||||
|
||||
// getBounds gets the upper and lower bounds for the given series
|
||||
// for the given z score threshold
|
||||
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||
func (p *BaseSeasonalProvider) getBounds(
|
||||
series, predictedSeries *qbtypes.TimeSeries,
|
||||
zScoreThreshold float64,
|
||||
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
|
||||
upperBoundSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
lowerBoundSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
for idx, curr := range series.Values {
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: upperBound,
|
||||
})
|
||||
lowerBoundSeries.Values = append(lowerBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: math.Max(lowerBound, 0),
|
||||
})
|
||||
}
|
||||
|
||||
return upperBoundSeries, lowerBoundSeries
|
||||
}
|
||||
|
||||
// getExpectedValue gets the expected value for the given series
|
||||
// for the given index
|
||||
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
|
||||
func (p *BaseSeasonalProvider) getExpectedValue(
|
||||
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, idx int,
|
||||
) float64 {
|
||||
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
|
||||
}
|
||||
|
||||
// getScore gets the anomaly score for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
if expectedValue < 0 {
|
||||
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
// getAnomalyScores gets the anomaly scores for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
|
||||
) *qbtypes.TimeSeries {
|
||||
anomalyScoreSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
for idx, curr := range series.Values {
|
||||
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
|
||||
anomalyScoreSeries.Values = append(anomalyScoreSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: anomalyScore,
|
||||
})
|
||||
}
|
||||
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.CurrentPeriodResults {
|
||||
currentPeriodResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.PastPeriodResults {
|
||||
pastPeriodResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
currentSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.CurrentSeasonResults {
|
||||
currentSeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.PastSeasonResults {
|
||||
pastSeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
past2SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.Past2SeasonResults {
|
||||
past2SeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
past3SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.Past3SeasonResults {
|
||||
past3SeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
for _, result := range currentPeriodResults {
|
||||
funcs := req.Params.FuncsForQuery(result.QueryName)
|
||||
|
||||
var zScoreThreshold float64
|
||||
for _, f := range funcs {
|
||||
if f.Name == qbtypes.FunctionNameAnomaly {
|
||||
for _, arg := range f.Args {
|
||||
if arg.Name != "z_score_threshold" {
|
||||
continue
|
||||
}
|
||||
value, ok := arg.Value.(float64)
|
||||
if ok {
|
||||
zScoreThreshold = value
|
||||
} else {
|
||||
p.logger.InfoContext(ctx, "z_score_threshold not provided, defaulting")
|
||||
zScoreThreshold = 3
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pastPeriodResult, ok := pastPeriodResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
currentSeasonResult, ok := currentSeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
pastSeasonResult, ok := pastSeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past2SeasonResult, ok := past2SeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past3SeasonResult, ok := past3SeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// no data;
|
||||
if len(result.Aggregations) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
aggOfInterest := result.Aggregations[0]
|
||||
|
||||
for _, series := range aggOfInterest.Series {
|
||||
stdDev := p.getStdDev(series)
|
||||
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
|
||||
|
||||
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
|
||||
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
|
||||
pastSeasonSeries := p.getMatchingSeries(ctx, pastSeasonResult, series)
|
||||
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
|
||||
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
|
||||
|
||||
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
p.logger.InfoContext(ctx, "calculated mean for series",
|
||||
"anomaly_prev_series_avg", prevSeriesAvg,
|
||||
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
|
||||
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
|
||||
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
|
||||
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
|
||||
"anomaly_labels", series.Labels,
|
||||
)
|
||||
|
||||
predictedSeries := p.getPredictedSeries(
|
||||
ctx,
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
aggOfInterest.PredictedSeries = append(aggOfInterest.PredictedSeries, predictedSeries)
|
||||
|
||||
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||
series,
|
||||
predictedSeries,
|
||||
zScoreThreshold,
|
||||
)
|
||||
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)
|
||||
aggOfInterest.LowerBoundSeries = append(aggOfInterest.LowerBoundSeries, lowerBoundSeries)
|
||||
|
||||
anomalyScoreSeries := p.getAnomalyScores(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
aggOfInterest.AnomalyScores = append(aggOfInterest.AnomalyScores, anomalyScoreSeries)
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*qbtypes.TimeSeriesData, 0, len(currentPeriodResults))
|
||||
for _, result := range currentPeriodResults {
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return &AnomaliesResponse{
|
||||
Results: results,
|
||||
}, nil
|
||||
}
|
||||
34
ee/anomaly/weekly.go
Normal file
34
ee/anomaly/weekly.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*WeeklyProvider)(nil)
|
||||
|
||||
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &wp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
|
||||
wp := &WeeklyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(wp)
|
||||
}
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type Pat struct {
|
||||
uuid *authtypes.UUID
|
||||
headers []string
|
||||
}
|
||||
|
||||
func NewPat(headers []string) *Pat {
|
||||
return &Pat{uuid: authtypes.NewUUID(), headers: headers}
|
||||
}
|
||||
|
||||
func (p *Pat) Wrap(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var values []string
|
||||
for _, header := range p.headers {
|
||||
values = append(values, r.Header.Get(header))
|
||||
}
|
||||
|
||||
ctx, err := p.uuid.ContextFromRequest(r.Context(), values...)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
}
|
||||
26
ee/licensing/config.go
Normal file
26
ee/licensing/config.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package licensing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
)
|
||||
|
||||
var (
|
||||
config licensing.Config
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// initializes the licensing configuration
|
||||
func Config(pollInterval time.Duration, failureThreshold int) licensing.Config {
|
||||
once.Do(func() {
|
||||
config = licensing.Config{PollInterval: pollInterval, FailureThreshold: failureThreshold}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(fmt.Errorf("invalid licensing config: %w", err))
|
||||
}
|
||||
})
|
||||
|
||||
return config
|
||||
}
|
||||
168
ee/licensing/httplicensing/api.go
Normal file
168
ee/licensing/httplicensing/api.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package httplicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type licensingAPI struct {
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func NewLicensingAPI(licensing licensing.Licensing) licensing.API {
|
||||
return &licensingAPI{licensing: licensing}
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Activate(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
req := new(licensetypes.PostableLicense)
|
||||
err = json.NewDecoder(r.Body).Decode(&req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = api.licensing.Activate(r.Context(), orgID, req.Key)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusAccepted, nil)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) GetActive(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
license, err := api.licensing.GetActive(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableLicense := licensetypes.NewGettableLicense(license.Data, license.Key)
|
||||
render.Success(rw, http.StatusOK, gettableLicense)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Refresh(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
err = api.licensing.Refresh(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Checkout(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
req := new(licensetypes.PostableSubscription)
|
||||
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableSubscription, err := api.licensing.Checkout(ctx, orgID, req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusCreated, gettableSubscription)
|
||||
}
|
||||
|
||||
func (api *licensingAPI) Portal(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
req := new(licensetypes.PostableSubscription)
|
||||
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
gettableSubscription, err := api.licensing.Portal(ctx, orgID, req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusCreated, gettableSubscription)
|
||||
}
|
||||
249
ee/licensing/httplicensing/provider.go
Normal file
249
ee/licensing/httplicensing/provider.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package httplicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/analyticstypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
store licensetypes.Store
|
||||
zeus zeus.Zeus
|
||||
config licensing.Config
|
||||
settings factory.ScopedProviderSettings
|
||||
orgGetter organization.Getter
|
||||
analytics analytics.Analytics
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
func NewProviderFactory(store sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
|
||||
return New(ctx, providerSettings, config, store, zeus, orgGetter, analytics)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, ps factory.ProviderSettings, config licensing.Config, sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) (licensing.Licensing, error) {
|
||||
settings := factory.NewScopedProviderSettings(ps, "github.com/SigNoz/signoz/ee/licensing/httplicensing")
|
||||
licensestore := sqllicensingstore.New(sqlstore)
|
||||
return &provider{
|
||||
store: licensestore,
|
||||
zeus: zeus,
|
||||
config: config,
|
||||
settings: settings,
|
||||
orgGetter: orgGetter,
|
||||
stopChan: make(chan struct{}),
|
||||
analytics: analytics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
tick := time.NewTicker(provider.config.PollInterval)
|
||||
defer tick.Stop()
|
||||
|
||||
err := provider.Validate(ctx)
|
||||
if err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-provider.stopChan:
|
||||
return nil
|
||||
case <-tick.C:
|
||||
err := provider.Validate(ctx)
|
||||
if err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
provider.settings.Logger().DebugContext(ctx, "license validation stopped")
|
||||
close(provider.stopChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Validate(ctx context.Context) error {
|
||||
organizations, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, organization := range organizations {
|
||||
err := provider.Refresh(ctx, organization.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Activate(ctx context.Context, organizationID valuer.UUID, key string) error {
|
||||
data, err := provider.zeus.GetLicense(ctx, key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to fetch license data with upstream server")
|
||||
}
|
||||
|
||||
license, err := licensetypes.NewLicense(data, organizationID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to create license entity")
|
||||
}
|
||||
|
||||
storableLicense := licensetypes.NewStorableLicenseFromLicense(license)
|
||||
err = provider.store.Create(ctx, storableLicense)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetActive(ctx context.Context, organizationID valuer.UUID) (*licensetypes.License, error) {
|
||||
storableLicenses, err := provider.store.GetAll(ctx, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeLicense, err := licensetypes.GetActiveLicenseFromStorableLicenses(storableLicenses, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return activeLicense, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUID) error {
|
||||
activeLicense, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil
|
||||
}
|
||||
provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue())
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := provider.zeus.GetLicense(ctx, activeLicense.Key)
|
||||
if err != nil {
|
||||
if time.Since(activeLicense.LastValidatedAt) > time.Duration(provider.config.FailureThreshold)*provider.config.PollInterval {
|
||||
activeLicense.UpdateFeatures(licensetypes.BasicPlan)
|
||||
updatedStorableLicense := licensetypes.NewStorableLicenseFromLicense(activeLicense)
|
||||
err = provider.store.Update(ctx, organizationID, updatedStorableLicense)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = activeLicense.Update(data)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to create license entity from license data")
|
||||
}
|
||||
|
||||
updatedStorableLicense := licensetypes.NewStorableLicenseFromLicense(activeLicense)
|
||||
err = provider.store.Update(ctx, organizationID, updatedStorableLicense)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats := licensetypes.NewStatsFromLicense(activeLicense)
|
||||
provider.analytics.Send(ctx,
|
||||
analyticstypes.Track{
|
||||
UserId: "stats_" + organizationID.String(),
|
||||
Event: "License Updated",
|
||||
Properties: analyticstypes.NewPropertiesFromMap(stats),
|
||||
Context: &analyticstypes.Context{
|
||||
Extra: map[string]interface{}{
|
||||
analyticstypes.KeyGroupID: organizationID.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
analyticstypes.Group{
|
||||
UserId: "stats_" + organizationID.String(),
|
||||
GroupId: organizationID.String(),
|
||||
Traits: analyticstypes.NewTraitsFromMap(stats),
|
||||
},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Checkout(ctx context.Context, organizationID valuer.UUID, postableSubscription *licensetypes.PostableSubscription) (*licensetypes.GettableSubscription, error) {
|
||||
activeLicense, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, err := json.Marshal(postableSubscription)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to marshal checkout payload")
|
||||
}
|
||||
|
||||
response, err := provider.zeus.GetCheckoutURL(ctx, activeLicense.Key, body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to generate checkout session")
|
||||
}
|
||||
|
||||
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Portal(ctx context.Context, organizationID valuer.UUID, postableSubscription *licensetypes.PostableSubscription) (*licensetypes.GettableSubscription, error) {
|
||||
activeLicense, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, err := json.Marshal(postableSubscription)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to marshal portal payload")
|
||||
}
|
||||
|
||||
response, err := provider.zeus.GetPortalURL(ctx, activeLicense.Key, body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to generate portal session")
|
||||
}
|
||||
|
||||
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetFeatureFlags(ctx context.Context, organizationID valuer.UUID) ([]*licensetypes.Feature, error) {
|
||||
license, err := provider.GetActive(ctx, organizationID)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
return licensetypes.BasicPlan, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return license.Features, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) {
|
||||
activeLicense, err := provider.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
return map[string]any{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return licensetypes.NewStatsFromLicense(activeLicense), nil
|
||||
}
|
||||
81
ee/licensing/licensingstore/sqllicensingstore/store.go
Normal file
81
ee/licensing/licensingstore/sqllicensingstore/store.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package sqllicensingstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type store struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func New(sqlstore sqlstore.SQLStore) licensetypes.Store {
|
||||
return &store{sqlstore}
|
||||
}
|
||||
|
||||
func (store *store) Create(ctx context.Context, storableLicense *licensetypes.StorableLicense) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(storableLicense).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return store.sqlstore.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "license with ID: %s already exists", storableLicense.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) Get(ctx context.Context, organizationID valuer.UUID, licenseID valuer.UUID) (*licensetypes.StorableLicense, error) {
|
||||
storableLicense := new(licensetypes.StorableLicense)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(storableLicense).
|
||||
Where("org_id = ?", organizationID).
|
||||
Where("id = ?", licenseID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "license with ID: %s does not exist", licenseID)
|
||||
}
|
||||
|
||||
return storableLicense, nil
|
||||
}
|
||||
|
||||
func (store *store) GetAll(ctx context.Context, organizationID valuer.UUID) ([]*licensetypes.StorableLicense, error) {
|
||||
storableLicenses := make([]*licensetypes.StorableLicense, 0)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(&storableLicenses).
|
||||
Where("org_id = ?", organizationID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "licenses for organizationID: %s does not exists", organizationID)
|
||||
}
|
||||
|
||||
return storableLicenses, nil
|
||||
}
|
||||
|
||||
func (store *store) Update(ctx context.Context, organizationID valuer.UUID, storableLicense *licensetypes.StorableLicense) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewUpdate().
|
||||
Model(storableLicense).
|
||||
WherePK().
|
||||
Where("org_id = ?", organizationID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to update license with ID: %s", storableLicense.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
.vscode
|
||||
README.md
|
||||
signoz.db
|
||||
bin
|
||||
@@ -1,34 +0,0 @@
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.20.3
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
|
||||
# define arguments that can be passed during build time
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
# add ca-certificates in case you need them
|
||||
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
|
||||
|
||||
# set working directory
|
||||
WORKDIR /root
|
||||
|
||||
# copy the signoz binary
|
||||
COPY ee/query-service/bin/signoz-${TARGETOS}-${TARGETARCH} /root/signoz
|
||||
|
||||
# copy prometheus YAML config
|
||||
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
|
||||
COPY pkg/query-service/templates /root/templates
|
||||
|
||||
# Make signoz executable for non-root users
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
# Copy frontend
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
# run the binary
|
||||
ENTRYPOINT ["./signoz"]
|
||||
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
|
||||
EXPOSE 8080
|
||||
@@ -3,8 +3,9 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
@@ -28,17 +29,16 @@ func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvi
|
||||
}
|
||||
|
||||
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
FeatureLookup: dp.ff,
|
||||
Reader: dp.reader,
|
||||
Cache: dp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: dp.fluxInterval,
|
||||
})
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, req)
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
|
||||
@@ -3,8 +3,9 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
@@ -28,17 +29,16 @@ func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyPr
|
||||
}
|
||||
|
||||
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
FeatureLookup: hp.ff,
|
||||
Reader: hp.reader,
|
||||
Cache: hp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: hp.fluxInterval,
|
||||
})
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, req)
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
type Seasonality string
|
||||
|
||||
@@ -2,8 +2,10 @@ package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/postprocess"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -38,12 +39,6 @@ func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericPr
|
||||
}
|
||||
}
|
||||
|
||||
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().ff = ff
|
||||
}
|
||||
}
|
||||
|
||||
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().reader = reader
|
||||
@@ -56,7 +51,6 @@ type BaseSeasonalProvider struct {
|
||||
fluxInterval time.Duration
|
||||
cache cache.Cache
|
||||
keyGenerator cache.KeyGenerator
|
||||
ff interfaces.FeatureLookup
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
|
||||
@@ -66,9 +60,9 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
|
||||
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,7 +73,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
|
||||
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,7 +84,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
|
||||
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -101,7 +95,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
|
||||
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -112,7 +106,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
|
||||
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -123,7 +117,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
|
||||
}
|
||||
|
||||
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
|
||||
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -313,6 +307,9 @@ func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
if expectedValue < 0 {
|
||||
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
@@ -339,9 +336,9 @@ func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
|
||||
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,8 +3,9 @@ package anomaly
|
||||
import (
|
||||
"context"
|
||||
|
||||
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
@@ -27,17 +28,16 @@ func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyPr
|
||||
}
|
||||
|
||||
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
FeatureLookup: wp.ff,
|
||||
Reader: wp.reader,
|
||||
Cache: wp.cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
FluxInterval: wp.fluxInterval,
|
||||
})
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, req)
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
|
||||
@@ -5,39 +5,33 @@ import (
|
||||
"net/http/httputil"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/apis/fields"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
querierAPI "github.com/SigNoz/signoz/pkg/querier"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
"go.signoz.io/signoz/pkg/alertmanager"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
rules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferSpanMetrics bool
|
||||
AppDao dao.ModelDao
|
||||
DataConnector interfaces.Reader
|
||||
RulesManager *rules.Manager
|
||||
UsageManager *usage.Manager
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
@@ -54,23 +48,18 @@ type APIHandler struct {
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
|
||||
|
||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||
Reader: opts.DataConnector,
|
||||
SkipConfig: opts.SkipConfig,
|
||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||
AppDao: opts.AppDao,
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
||||
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore),
|
||||
Signoz: signoz,
|
||||
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -84,103 +73,48 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
|
||||
return ah, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) FF() baseint.FeatureLookup {
|
||||
return ah.opts.FeatureFlags
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RM() *rules.Manager {
|
||||
return ah.opts.RulesManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) LM() *license.Manager {
|
||||
return ah.opts.LicenseManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) UM() *usage.Manager {
|
||||
return ah.opts.UsageManager
|
||||
}
|
||||
|
||||
func (ah *APIHandler) AppDao() dao.ModelDao {
|
||||
return ah.opts.AppDao
|
||||
}
|
||||
|
||||
func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
|
||||
return ah.opts.Gateway
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CheckFeature(f string) bool {
|
||||
err := ah.FF().CheckFeature(f)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
// note: add ee override methods first
|
||||
|
||||
// routes available only in ee version
|
||||
|
||||
router.HandleFunc("/api/v1/featureFlags",
|
||||
am.OpenAccess(ah.getFeatureFlags)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/loginPrecheck",
|
||||
am.OpenAccess(ah.precheckLogin)).
|
||||
Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/features", am.ViewAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml",
|
||||
am.OpenAccess(ah.receiveSAML)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/complete/google",
|
||||
am.OpenAccess(ah.receiveGoogleAuth)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
|
||||
am.AdminAccess(ah.listDomainsByOrg)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/domains",
|
||||
am.AdminAccess(ah.postDomain)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.putDomain)).
|
||||
Methods(http.MethodPut)
|
||||
|
||||
router.HandleFunc("/api/v1/domains/{id}",
|
||||
am.AdminAccess(ah.deleteDomain)).
|
||||
Methods(http.MethodDelete)
|
||||
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
|
||||
|
||||
// PAT APIs
|
||||
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.getPATs)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.updatePAT)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/pats/{id}", am.AdminAccess(ah.revokePAT)).Methods(http.MethodDelete)
|
||||
|
||||
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.checkout)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/checkout", am.AdminAccess(ah.LicensingAPI.Checkout)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.portalSession)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.LicensingAPI.Portal)).Methods(http.MethodPost)
|
||||
|
||||
// v3
|
||||
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Activate)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Refresh)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.LicensingAPI.GetActive)).Methods(http.MethodGet)
|
||||
|
||||
// v4
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
|
||||
// v5
|
||||
router.HandleFunc("/api/v5/query_range", am.ViewAccess(ah.queryRangeV5)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v5/substitute_vars", am.ViewAccess(ah.QuerierAPI.ReplaceVariables)).Methods(http.MethodPost)
|
||||
|
||||
// Gateway
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
||||
|
||||
@@ -188,7 +122,7 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
|
||||
}
|
||||
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
|
||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
|
||||
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||
|
||||
@@ -200,9 +134,8 @@ func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *ba
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||
version := version.GetVersion()
|
||||
versionResponse := basemodel.GetVersionResponse{
|
||||
Version: version,
|
||||
Version: version.Info.Version(),
|
||||
EE: "Y",
|
||||
SetupCompleted: ah.SetupCompleted,
|
||||
}
|
||||
|
||||
@@ -3,192 +3,16 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
func parseRequest(r *http.Request, req interface{}) error {
|
||||
defer r.Body.Close()
|
||||
requestBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
return err
|
||||
}
|
||||
|
||||
// loginUser overrides base handler and considers SSO case.
|
||||
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := basemodel.LoginRequest{}
|
||||
err := parseRequest(r, &req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if req.Email != "" && ah.CheckFeature(model.SSO) {
|
||||
var apierr basemodel.BaseApiError
|
||||
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
|
||||
if apierr != nil && !apierr.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// if all looks good, call auth
|
||||
resp, err := baseauth.Login(ctx, &req, ah.opts.JWT)
|
||||
if ah.HandleError(w, err, http.StatusUnauthorized) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// registerUser registers a user and responds with a precheck
|
||||
// so the front-end can decide the login method
|
||||
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
ah.APIHandler.Register(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var req *baseauth.RegisterRequest
|
||||
|
||||
defer r.Body.Close()
|
||||
requestBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
zap.L().Error("received no input in api", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(requestBody, &req)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("received invalid user registration request", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get invite object
|
||||
invite, err := baseauth.ValidateInvite(ctx, req)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to validate invite token", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if invite == nil {
|
||||
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
|
||||
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// get auth domain from email domain
|
||||
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
|
||||
if apierr != nil {
|
||||
zap.L().Error("failed to get domain from email", zap.Error(apierr))
|
||||
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
|
||||
}
|
||||
|
||||
precheckResp := &basemodel.PrecheckResponse{
|
||||
SSO: false,
|
||||
IsUser: false,
|
||||
}
|
||||
|
||||
if domain != nil && domain.SsoEnabled {
|
||||
// sso is enabled, create user and respond precheck data
|
||||
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var precheckError basemodel.BaseApiError
|
||||
|
||||
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
|
||||
if precheckError != nil {
|
||||
RespondError(w, precheckError, precheckResp)
|
||||
}
|
||||
|
||||
} else {
|
||||
// no-sso, validate password
|
||||
if err := baseauth.ValidatePassword(req.Password); err != nil {
|
||||
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager)
|
||||
if !registerError.IsNil() {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
precheckResp.IsUser = true
|
||||
}
|
||||
|
||||
ah.Respond(w, precheckResp)
|
||||
}
|
||||
|
||||
// getInvite returns the invite object details for the given invite token. We do not need to
|
||||
// protect this API because invite token itself is meant to be private.
|
||||
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
|
||||
token := mux.Vars(r)["token"]
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
ctx := context.Background()
|
||||
|
||||
inviteObject, err := baseauth.GetInvite(context.Background(), token)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp := model.GettableInvitation{
|
||||
InvitationResponseObject: inviteObject,
|
||||
}
|
||||
|
||||
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
|
||||
resp.Precheck = precheck
|
||||
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, resp)
|
||||
}
|
||||
|
||||
// PrecheckLogin enables browser login page to display appropriate
|
||||
// login methods
|
||||
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
email := r.URL.Query().Get("email")
|
||||
sourceUrl := r.URL.Query().Get("ref")
|
||||
|
||||
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, resp)
|
||||
}
|
||||
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
@@ -197,84 +21,12 @@ func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string)
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveGoogleAuth completes google OAuth response and forwards a request
|
||||
// to front-end to sign user in
|
||||
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
q := r.URL.Query()
|
||||
if errType := q.Get("error"); errType != "" {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
relayState := q.Get("state")
|
||||
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// now that we have domain, use domain to fetch sso settings.
|
||||
// prepare google callback handler using parsedState -
|
||||
// which contains redirect URL (front-end endpoint)
|
||||
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to prepare google oauth provider", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
identity, err := callbackHandler.HandleCallback(r)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
if !ah.CheckFeature(model.SSO) {
|
||||
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
@@ -298,12 +50,25 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
|
||||
domain, err := ah.Signoz.Modules.User.GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(domain.OrgID)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||
@@ -331,7 +96,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
|
||||
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,18 @@ type CloudIntegrationConnectionParamsResponse struct {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
if cloudProvider != "aws" {
|
||||
RespondError(w, basemodel.BadRequest(fmt.Errorf(
|
||||
@@ -38,15 +50,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
currentUser, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
|
||||
"couldn't deduce current user: %w", err,
|
||||
)), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgID, cloudProvider)
|
||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't provision PAT for cloud integration:",
|
||||
@@ -58,11 +62,9 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
SigNozAPIKey: apiKey,
|
||||
}
|
||||
|
||||
license, apiErr := ah.LM().GetRepo().GetActiveLicense(r.Context())
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't look for active license",
|
||||
), nil)
|
||||
license, err := ah.Signoz.Licensing.GetActive(r.Context(), orgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -118,7 +120,14 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
return "", apiErr
|
||||
}
|
||||
|
||||
allPats, err := ah.AppDao().ListPATs(ctx)
|
||||
orgIdUUID, err := valuer.NewUUID(orgId)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't parse orgId: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't list PATs: %w", err,
|
||||
@@ -135,32 +144,36 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newPAT := model.PAT{
|
||||
Token: generatePATToken(),
|
||||
UserID: integrationUser.ID,
|
||||
Name: integrationPATName,
|
||||
Role: baseconstants.ViewerGroup,
|
||||
ExpiresAt: 0,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
UpdatedAt: time.Now().Unix(),
|
||||
}
|
||||
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
|
||||
newPAT, err := types.NewStorableAPIKey(
|
||||
integrationPATName,
|
||||
integrationUser.ID,
|
||||
types.RoleViewer,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
))
|
||||
}
|
||||
return integrationPAT.Token, nil
|
||||
|
||||
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration PAT: %w", err,
|
||||
))
|
||||
}
|
||||
return newPAT.Token, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
|
||||
integrationUserResult, err := ah.Signoz.Modules.User.GetUserByEmailInOrg(ctx, orgId, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, basemodel.NotFoundError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
if integrationUserResult != nil {
|
||||
@@ -172,33 +185,18 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newUser := &types.User{
|
||||
ID: cloudIntegrationUserId,
|
||||
Name: fmt.Sprintf("%s integration", cloudProvider),
|
||||
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgId,
|
||||
}
|
||||
|
||||
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
|
||||
}
|
||||
newUser.GroupID = viewerGroup.ID
|
||||
|
||||
passwordHash, err := auth.PasswordHash(uuid.NewString())
|
||||
newUser, err := types.NewUser(cloudIntegrationUser, email, types.RoleViewer.String(), orgId)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't hash random password for cloud integration user: %w", err,
|
||||
"couldn't create cloud integration user: %w", err,
|
||||
))
|
||||
}
|
||||
newUser.Password = passwordHash
|
||||
|
||||
integrationUser, apiErr := ah.AppDao().CreateUser(ctx, newUser, false)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't create cloud integration user")
|
||||
password, err := types.NewFactorPassword(uuid.NewString())
|
||||
|
||||
integrationUser, err := ah.Signoz.Modules.User.CreateUserWithPassword(ctx, newUser, password)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||
}
|
||||
|
||||
return integrationUser, nil
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/pkg/errors"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
ah.lockUnlockDashboard(w, r, true)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
ah.lockUnlockDashboard(w, r, false)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
|
||||
// Locking can only be done by the owner of the dashboard
|
||||
// or an admin
|
||||
|
||||
// - Fetch the dashboard
|
||||
// - Check if the user is the owner or an admin
|
||||
// - If yes, lock/unlock the dashboard
|
||||
// - If no, return 403
|
||||
|
||||
// Get the dashboard UUID from the request
|
||||
uuid := mux.Vars(r)["uuid"]
|
||||
if strings.HasPrefix(uuid, "integration") {
|
||||
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "dashboards created by integrations cannot be modified"))
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
if !auth.IsAdminV2(claims) && (dashboard.CreatedBy != claims.Email) {
|
||||
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
// Lock/Unlock the dashboard
|
||||
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, "Dashboard updated successfully")
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
|
||||
orgId := mux.Vars(r)["orgId"]
|
||||
domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, domains)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, domains)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.OrgDomain{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.ValidNew(); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req := model.OrgDomain{Id: domainId}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
req.Id = domainId
|
||||
if err := req.Valid(nil); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
}
|
||||
|
||||
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &req)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
|
||||
domainIdStr := mux.Vars(r)["id"]
|
||||
|
||||
domainId, err := uuid.Parse(domainIdStr)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, nil)
|
||||
}
|
||||
@@ -8,14 +8,30 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
pkgError "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
featureSet, err := ah.FF().GetFeatureFlags()
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, pkgError.Newf(pkgError.TypeInvalidInput, pkgError.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
featureSet, err := ah.Signoz.Licensing.GetFeatureFlags(r.Context(), orgID)
|
||||
if err != nil {
|
||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
@@ -23,7 +39,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if constants.FetchFeatures == "true" {
|
||||
zap.L().Debug("fetching license")
|
||||
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to fetch license", zap.Error(err))
|
||||
} else if license == nil {
|
||||
@@ -43,10 +59,17 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if ah.opts.PreferSpanMetrics {
|
||||
for idx := range featureSet {
|
||||
feature := &featureSet[idx]
|
||||
if feature.Name == basemodel.UseSpanMetrics {
|
||||
if constants.IsPreferSpanMetrics {
|
||||
for idx, feature := range featureSet {
|
||||
if feature.Name == licensetypes.UseSpanMetrics {
|
||||
featureSet[idx].Active = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if constants.IsDotMetricsEnabled {
|
||||
for idx, feature := range featureSet {
|
||||
if feature.Name == licensetypes.DotMetricsEnabled {
|
||||
featureSet[idx].Active = true
|
||||
}
|
||||
}
|
||||
@@ -57,7 +80,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
|
||||
// and returns the FeatureSet.
|
||||
func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
|
||||
func fetchZeusFeatures(url, licenseKey string) ([]*licensetypes.Feature, error) {
|
||||
// Check if the URL is empty
|
||||
if url == "" {
|
||||
return nil, fmt.Errorf("url is empty")
|
||||
@@ -116,28 +139,28 @@ func fetchZeusFeatures(url, licenseKey string) (basemodel.FeatureSet, error) {
|
||||
}
|
||||
|
||||
type ZeusFeaturesResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data basemodel.FeatureSet `json:"data"`
|
||||
Status string `json:"status"`
|
||||
Data []*licensetypes.Feature `json:"data"`
|
||||
}
|
||||
|
||||
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
|
||||
func MergeFeatureSets(zeusFeatures, internalFeatures basemodel.FeatureSet) basemodel.FeatureSet {
|
||||
func MergeFeatureSets(zeusFeatures, internalFeatures []*licensetypes.Feature) []*licensetypes.Feature {
|
||||
// Create a map to store the merged features
|
||||
featureMap := make(map[string]basemodel.Feature)
|
||||
featureMap := make(map[string]*licensetypes.Feature)
|
||||
|
||||
// Add all features from the otherFeatures set to the map
|
||||
for _, feature := range internalFeatures {
|
||||
featureMap[feature.Name] = feature
|
||||
featureMap[feature.Name.StringValue()] = feature
|
||||
}
|
||||
|
||||
// Add all features from the zeusFeatures set to the map
|
||||
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it
|
||||
for _, feature := range zeusFeatures {
|
||||
featureMap[feature.Name] = feature
|
||||
featureMap[feature.Name.StringValue()] = feature
|
||||
}
|
||||
|
||||
// Convert the map back to a FeatureSet slice
|
||||
var mergedFeatures basemodel.FeatureSet
|
||||
var mergedFeatures []*licensetypes.Feature
|
||||
for _, feature := range featureMap {
|
||||
mergedFeatures = append(mergedFeatures, feature)
|
||||
}
|
||||
|
||||
@@ -3,78 +3,79 @@ package api
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func TestMergeFeatureSets(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
zeusFeatures basemodel.FeatureSet
|
||||
internalFeatures basemodel.FeatureSet
|
||||
expected basemodel.FeatureSet
|
||||
zeusFeatures []*licensetypes.Feature
|
||||
internalFeatures []*licensetypes.Feature
|
||||
expected []*licensetypes.Feature
|
||||
}{
|
||||
{
|
||||
name: "empty zeusFeatures and internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{},
|
||||
zeusFeatures: []*licensetypes.Feature{},
|
||||
internalFeatures: []*licensetypes.Feature{},
|
||||
expected: []*licensetypes.Feature{},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and empty internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
zeusFeatures: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
internalFeatures: []*licensetypes.Feature{},
|
||||
expected: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty zeusFeatures and non-empty internalFeatures",
|
||||
zeusFeatures: basemodel.FeatureSet{},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
zeusFeatures: []*licensetypes.Feature{},
|
||||
internalFeatures: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: false},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
expected: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
zeusFeatures: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature3"), Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature4", Active: false},
|
||||
internalFeatures: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature2"), Active: true},
|
||||
{Name: valuer.NewString("Feature4"), Active: false},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: true},
|
||||
{Name: "Feature3", Active: false},
|
||||
{Name: "Feature4", Active: false},
|
||||
expected: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: true},
|
||||
{Name: valuer.NewString("Feature3"), Active: false},
|
||||
{Name: valuer.NewString("Feature4"), Active: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
|
||||
zeusFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
zeusFeatures: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: false},
|
||||
},
|
||||
internalFeatures: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
internalFeatures: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: false},
|
||||
{Name: valuer.NewString("Feature3"), Active: true},
|
||||
},
|
||||
expected: basemodel.FeatureSet{
|
||||
{Name: "Feature1", Active: true},
|
||||
{Name: "Feature2", Active: false},
|
||||
{Name: "Feature3", Active: true},
|
||||
expected: []*licensetypes.Feature{
|
||||
{Name: valuer.NewString("Feature1"), Active: true},
|
||||
{Name: valuer.NewString("Feature2"), Active: false},
|
||||
{Name: valuer.NewString("Feature3"), Active: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -4,11 +4,27 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
|
||||
validPath := false
|
||||
for _, allowedPrefix := range gateway.AllowedPrefix {
|
||||
if strings.HasPrefix(req.URL.Path, gateway.RoutePrefix+allowedPrefix) {
|
||||
@@ -22,9 +38,9 @@ func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
license, err := ah.LM().GetRepo().GetActiveLicense(ctx)
|
||||
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
RespondError(rw, err, nil)
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,8 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/signozio"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
)
|
||||
|
||||
type DayWiseBreakdown struct {
|
||||
@@ -47,10 +45,6 @@ type details struct {
|
||||
BillTotal float64 `json:"billTotal"`
|
||||
}
|
||||
|
||||
type Redirect struct {
|
||||
RedirectURL string `json:"redirectURL"`
|
||||
}
|
||||
|
||||
type billingDetails struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
@@ -62,93 +56,6 @@ type billingDetails struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ApplyLicenseRequest struct {
|
||||
LicenseKey string `json:"key"`
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
ah.listLicensesV2(w, r)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// return 404 not found if there is no active license
|
||||
if activeLicense == nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO deprecate this when we move away from key for stripe
|
||||
activeLicense.Data["key"] = activeLicense.Key
|
||||
render.Success(w, http.StatusOK, activeLicense.Data)
|
||||
}
|
||||
|
||||
// this function is called by zeus when inserting licenses in the query-service
|
||||
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
var licenseKey ApplyLicenseRequest
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if licenseKey.LicenseKey == "" {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusAccepted, nil)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
apiError := ah.LM().RefreshLicense(r.Context())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
func getCheckoutPortalResponse(redirectURL string) *Redirect {
|
||||
return &Redirect{RedirectURL: redirectURL}
|
||||
}
|
||||
|
||||
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
checkoutRequest := &model.CheckoutRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(checkoutRequest); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
license := ah.LM().GetActiveLicense()
|
||||
if license == nil {
|
||||
RespondError(w, model.BadRequestStr("cannot proceed with checkout without license key"), nil)
|
||||
return
|
||||
}
|
||||
|
||||
redirectUrl, err := signozio.CheckoutSession(r.Context(), checkoutRequest, license.Key)
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
licenseKey := r.URL.Query().Get("licenseKey")
|
||||
|
||||
@@ -182,72 +89,3 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO(srikanthccv):Fetch the current day usage and add it to the response
|
||||
ah.Respond(w, billingResponse.Data)
|
||||
}
|
||||
|
||||
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||
licensesV2 := []model.License{}
|
||||
for _, l := range licenses {
|
||||
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = model.Basic
|
||||
}
|
||||
licenseV2 := model.License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
PlanDetails: "",
|
||||
FeatureSet: l.Features,
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: model.LicensePlan{
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
}
|
||||
licensesV2 = append(licensesV2, licenseV2)
|
||||
}
|
||||
return licensesV2
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
licenses := convertLicenseV3ToLicenseV2(licensesV3)
|
||||
|
||||
resp := model.Licenses{
|
||||
TrialStart: -1,
|
||||
TrialEnd: -1,
|
||||
OnTrial: false,
|
||||
WorkSpaceBlock: false,
|
||||
TrialConvertedToSubscription: false,
|
||||
GracePeriodEnd: -1,
|
||||
Licenses: licenses,
|
||||
}
|
||||
|
||||
ah.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
portalRequest := &model.PortalRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(portalRequest); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
license := ah.LM().GetActiveLicense()
|
||||
if license == nil {
|
||||
RespondError(w, model.BadRequestStr("cannot request the portal session without license key"), nil)
|
||||
return
|
||||
}
|
||||
|
||||
redirectUrl, err := signozio.PortalSession(r.Context(), portalRequest, license.Key)
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
|
||||
}
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func generatePATToken() string {
|
||||
// Generate a 32-byte random token.
|
||||
token := make([]byte, 32)
|
||||
rand.Read(token)
|
||||
// Encode the token in base64.
|
||||
encodedToken := base64.StdEncoding.EncodeToString(token)
|
||||
return encodedToken
|
||||
}
|
||||
|
||||
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.CreatePATRequestBody{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
pat := model.PAT{
|
||||
Name: req.Name,
|
||||
Role: req.Role,
|
||||
ExpiresAt: req.ExpiresInDays,
|
||||
}
|
||||
err = validatePATRequest(pat)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// All the PATs are associated with the user creating the PAT.
|
||||
pat.UserID = user.ID
|
||||
pat.CreatedAt = time.Now().Unix()
|
||||
pat.UpdatedAt = time.Now().Unix()
|
||||
pat.LastUsed = 0
|
||||
pat.Token = generatePATToken()
|
||||
|
||||
if pat.ExpiresAt != 0 {
|
||||
// convert expiresAt to unix timestamp from days
|
||||
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
|
||||
}
|
||||
|
||||
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
|
||||
var apierr basemodel.BaseApiError
|
||||
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, &pat)
|
||||
}
|
||||
|
||||
func validatePATRequest(req model.PAT) error {
|
||||
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||
return fmt.Errorf("valid role is required")
|
||||
}
|
||||
if req.ExpiresAt < 0 {
|
||||
return fmt.Errorf("valid expiresAt is required")
|
||||
}
|
||||
if req.Name == "" {
|
||||
return fmt.Errorf("valid name is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := model.PAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = validatePATRequest(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
req.UpdatedByUserID = user.ID
|
||||
id := mux.Vars(r)["id"]
|
||||
req.UpdatedAt = time.Now().Unix()
|
||||
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||
var apierr basemodel.BaseApiError
|
||||
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, map[string]string{"data": "pat updated successfully"})
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
zap.L().Info("Get PATs for user", zap.String("user_id", user.ID))
|
||||
pats, apierr := ah.AppDao().ListPATs(ctx)
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, pats)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
id := mux.Vars(r)["id"]
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorUnauthorized,
|
||||
Err: err,
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("Revoke PAT with id", zap.String("id", id))
|
||||
if apierr := ah.AppDao().RevokePAT(ctx, id, user.ID); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, map[string]string{"data": "pat revoked successfully"})
|
||||
}
|
||||
@@ -2,19 +2,39 @@ package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/anomaly"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
@@ -29,7 +49,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
queryRangeParams.Version = "v4"
|
||||
|
||||
// add temporality for each metric
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams)
|
||||
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
@@ -85,34 +105,30 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
switch seasonality {
|
||||
case anomaly.SeasonalityWeekly:
|
||||
provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityDaily:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
case anomaly.SeasonalityHourly:
|
||||
provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
default:
|
||||
provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
|
||||
anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
|
||||
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
|
||||
)
|
||||
}
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
anomalies, err := provider.GetAnomalies(r.Context(), orgID, &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
@@ -127,3 +143,139 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
aH.QueryRangeV4(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func extractSeasonality(anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) anomalyV2.Seasonality {
|
||||
for _, fn := range anomalyQuery.Functions {
|
||||
if fn.Name == qbtypes.FunctionNameAnomaly {
|
||||
for _, arg := range fn.Args {
|
||||
if arg.Name == "seasonality" {
|
||||
if seasonalityStr, ok := arg.Value.(string); ok {
|
||||
switch seasonalityStr {
|
||||
case "weekly":
|
||||
return anomalyV2.SeasonalityWeekly
|
||||
case "hourly":
|
||||
return anomalyV2.SeasonalityHourly
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return anomalyV2.SeasonalityDaily // default
|
||||
}
|
||||
|
||||
func createAnomalyProvider(aH *APIHandler, seasonality anomalyV2.Seasonality) anomalyV2.Provider {
|
||||
switch seasonality {
|
||||
case anomalyV2.SeasonalityWeekly:
|
||||
return anomalyV2.NewWeeklyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](aH.Signoz.Querier),
|
||||
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||
)
|
||||
case anomalyV2.SeasonalityHourly:
|
||||
return anomalyV2.NewHourlyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](aH.Signoz.Querier),
|
||||
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||
)
|
||||
default:
|
||||
return anomalyV2.NewDailyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](aH.Signoz.Querier),
|
||||
anomalyV2.WithLogger[*anomalyV2.DailyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (aH *APIHandler) handleAnomalyQuery(ctx context.Context, orgID valuer.UUID, anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], queryRangeRequest qbtypes.QueryRangeRequest) (*anomalyV2.AnomaliesResponse, error) {
|
||||
seasonality := extractSeasonality(anomalyQuery)
|
||||
provider := createAnomalyProvider(aH, seasonality)
|
||||
|
||||
return provider.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{Params: queryRangeRequest})
|
||||
}
|
||||
|
||||
func (aH *APIHandler) queryRangeV5(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
bodyBytes, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to read request body: %v", err))
|
||||
return
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
ctx := req.Context()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
var queryRangeRequest qbtypes.QueryRangeRequest
|
||||
if err := json.NewDecoder(req.Body).Decode(&queryRangeRequest); err != nil {
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to decode request body: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
stackTrace := string(debug.Stack())
|
||||
|
||||
queryJSON, _ := json.Marshal(queryRangeRequest)
|
||||
|
||||
aH.Signoz.Instrumentation.Logger().ErrorContext(ctx, "panic in QueryRange",
|
||||
"error", r,
|
||||
"user", claims.UserID,
|
||||
"payload", string(queryJSON),
|
||||
"stacktrace", stackTrace,
|
||||
)
|
||||
|
||||
render.Error(rw, errors.NewInternalf(
|
||||
errors.CodeInternal,
|
||||
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
|
||||
))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := queryRangeRequest.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
if anomalyQuery, ok := queryRangeRequest.IsAnomalyRequest(); ok {
|
||||
anomalies, err := aH.handleAnomalyQuery(ctx, orgID, anomalyQuery, queryRangeRequest)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.NewInternalf(errors.CodeInternal, "failed to get anomalies: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
results := []any{}
|
||||
for _, item := range anomalies.Results {
|
||||
results = append(results, item)
|
||||
}
|
||||
|
||||
finalResp := &qbtypes.QueryRangeResponse{
|
||||
Type: queryRangeRequest.RequestType,
|
||||
Data: struct {
|
||||
Results []any `json:"results"`
|
||||
}{
|
||||
Results: results,
|
||||
},
|
||||
Meta: struct {
|
||||
RowsScanned uint64 `json:"rowsScanned"`
|
||||
BytesScanned uint64 `json:"bytesScanned"`
|
||||
DurationMS uint64 `json:"durationMs"`
|
||||
}{},
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, finalResp)
|
||||
return
|
||||
} else {
|
||||
// regular query range request, let the querier handle it
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
aH.QuerierAPI.QueryRange(rw, req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ package api
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
|
||||
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
|
||||
ah.APIHandler.SearchTraces(w, r)
|
||||
return
|
||||
}
|
||||
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
|
||||
if ah.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
ah.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
type ClickhouseReader struct {
|
||||
conn clickhouse.Conn
|
||||
appdb *sqlx.DB
|
||||
*basechr.ClickHouseReader
|
||||
}
|
||||
|
||||
func NewDataConnector(
|
||||
localDB *sqlx.DB,
|
||||
ch clickhouse.Conn,
|
||||
promConfigPath string,
|
||||
lm interfaces.FeatureLookup,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickhouseReader {
|
||||
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
return &ClickhouseReader{
|
||||
conn: ch,
|
||||
appdb: localDB,
|
||||
ClickHouseReader: chReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) Start(readerReady chan bool) {
|
||||
r.ClickHouseReader.Start(readerReady)
|
||||
}
|
||||
@@ -2,193 +2,112 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/app/api"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/rules"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
eemiddleware "go.signoz.io/signoz/ee/http/middleware"
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
"go.signoz.io/signoz/ee/query-service/app/db"
|
||||
"go.signoz.io/signoz/ee/query-service/auth"
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/dao"
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/alertmanager"
|
||||
"go.signoz.io/signoz/pkg/http/middleware"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/sqlstore"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/healthcheck"
|
||||
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
|
||||
baserules "go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
PreferSpanMetrics bool
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
FluxIntervalForTraceDetail string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
Jwt *authtypes.JWT
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
// Server runs HTTP, Mux and a grpc server
|
||||
type Server struct {
|
||||
serverOptions *ServerOptions
|
||||
ruleManager *baserules.Manager
|
||||
config signoz.Config
|
||||
signoz *signoz.SigNoz
|
||||
jwt *authtypes.JWT
|
||||
ruleManager *baserules.Manager
|
||||
|
||||
// public http router
|
||||
httpConn net.Listener
|
||||
httpServer *http.Server
|
||||
httpConn net.Listener
|
||||
httpServer *http.Server
|
||||
httpHostPort string
|
||||
|
||||
// private http
|
||||
privateConn net.Listener
|
||||
privateHTTP *http.Server
|
||||
privateConn net.Listener
|
||||
privateHTTP *http.Server
|
||||
privateHostPort string
|
||||
|
||||
opampServer *opamp.Server
|
||||
|
||||
// Usage manager
|
||||
usageManager *usage.Manager
|
||||
|
||||
opampServer *opamp.Server
|
||||
|
||||
unavailableChannel chan healthcheck.Status
|
||||
}
|
||||
|
||||
// HealthCheckStatus returns health check status channel a client can subscribe to
|
||||
func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
return s.unavailableChannel
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore)
|
||||
func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT) (*Server, error) {
|
||||
gatewayProxy, err := gateway.NewProxy(config.Gateway.URL.String(), gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore.BunDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set license manager as feature flag provider in dao
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
qb := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
reader := clickhouseReader.NewReader(
|
||||
signoz.SQLStore,
|
||||
signoz.TelemetryStore,
|
||||
signoz.Prometheus,
|
||||
signoz.TelemetryStore.Cluster(),
|
||||
config.Querier.FluxInterval,
|
||||
signoz.Cache,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
// read skip config
|
||||
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(
|
||||
serverOptions.PromConfigPath,
|
||||
serverOptions.RuleRepoURL,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
serverOptions.SigNoz.Alertmanager,
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
signoz.Cache,
|
||||
signoz.Alertmanager,
|
||||
signoz.SQLStore,
|
||||
signoz.TelemetryStore,
|
||||
signoz.Prometheus,
|
||||
signoz.Modules.OrgGetter,
|
||||
signoz.Querier,
|
||||
signoz.Instrumentation.Logger(),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -196,19 +115,16 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opAmpModel.Init(signoz.SQLStore, signoz.Instrumentation.Logger(), signoz.Modules.OrgGetter)
|
||||
|
||||
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore)
|
||||
integrationsController, err := integrations.NewController(signoz.SQLStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore)
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create cloud provider integrations controller: %w", err,
|
||||
@@ -217,7 +133,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
signoz.SQLStore,
|
||||
integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -225,7 +142,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// initiate agent config handler
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
Store: signoz.SQLStore,
|
||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -233,59 +150,45 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
|
||||
usageManager, err := usage.New(signoz.Licensing, signoz.TelemetryStore.ClickhouseDB(), signoz.Zeus, signoz.Modules.OrgGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = usageManager.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SetReader(reader)
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
err = usageManager.Start(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiOpts := api.APIHandlerOptions{
|
||||
DataConnector: reader,
|
||||
SkipConfig: skipConfig,
|
||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||
AppDao: modelDao,
|
||||
RulesManager: rm,
|
||||
UsageManager: usageManager,
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
FluxInterval: config.Querier.FluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
GatewayUrl: serverOptions.GatewayUrl,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
JWT: serverOptions.Jwt,
|
||||
GatewayUrl: config.Gateway.URL.String(),
|
||||
JWT: jwt,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts, serverOptions.SigNoz)
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
// logger: logger,
|
||||
// tracer: tracer,
|
||||
config: config,
|
||||
signoz: signoz,
|
||||
jwt: jwt,
|
||||
ruleManager: rm,
|
||||
serverOptions: serverOptions,
|
||||
httpHostPort: baseconst.HTTPHostPort,
|
||||
privateHostPort: baseconst.PrivateHostPort,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
usageManager: usageManager,
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||
httpServer, err := s.createPublicServer(apiHandler, signoz.Web)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -301,25 +204,28 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
s.opampServer = opamp.InitializeServer(
|
||||
&opAmpModel.AllAgents, agentConfMgr,
|
||||
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
|
||||
)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
// HealthCheckStatus returns health check status channel a client can subscribe to
|
||||
func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
return s.unavailableChannel
|
||||
}
|
||||
|
||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
|
||||
s.config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.config.APIServer.Timeout.Default,
|
||||
s.config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
@@ -340,46 +246,32 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger())
|
||||
|
||||
// add auth middleware
|
||||
getUserFromRequest := func(ctx context.Context) (*types.GettableUser, error) {
|
||||
user, err := auth.GetUserFromRequestContext(ctx, apiHandler)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user.User.OrgID == "" {
|
||||
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
|
||||
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
|
||||
s.config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.config.APIServer.Timeout.Default,
|
||||
s.config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||
apiHandler.RegisterFieldsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
apiHandler.RegisterQueryRangeV5Routes(r, am)
|
||||
apiHandler.RegisterWebSocketPaths(r, am)
|
||||
apiHandler.RegisterMessagingQueuesRoutes(r, am)
|
||||
apiHandler.RegisterThirdPartyApiRoutes(r, am)
|
||||
apiHandler.MetricExplorerRoutes(r, am)
|
||||
apiHandler.RegisterTraceFunnelsRoutes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
@@ -405,7 +297,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
func (s *Server) initListeners() error {
|
||||
// listen on public port
|
||||
var err error
|
||||
publicHostPort := s.serverOptions.HTTPHostPort
|
||||
publicHostPort := s.httpHostPort
|
||||
if publicHostPort == "" {
|
||||
return fmt.Errorf("baseconst.HTTPHostPort is required")
|
||||
}
|
||||
@@ -415,10 +307,10 @@ func (s *Server) initListeners() error {
|
||||
return err
|
||||
}
|
||||
|
||||
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
|
||||
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.httpHostPort))
|
||||
|
||||
// listen on private port to support internal services
|
||||
privateHostPort := s.serverOptions.PrivateHostPort
|
||||
privateHostPort := s.privateHostPort
|
||||
|
||||
if privateHostPort == "" {
|
||||
return fmt.Errorf("baseconst.PrivateHostPort is required")
|
||||
@@ -428,20 +320,14 @@ func (s *Server) initListeners() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
|
||||
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.privateHostPort))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start listening on http and private http port concurrently
|
||||
func (s *Server) Start() error {
|
||||
|
||||
// initiate rule manager first
|
||||
if !s.serverOptions.DisableRules {
|
||||
s.ruleManager.Start()
|
||||
} else {
|
||||
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
||||
}
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
s.ruleManager.Start(ctx)
|
||||
|
||||
err := s.initListeners()
|
||||
if err != nil {
|
||||
@@ -454,7 +340,7 @@ func (s *Server) Start() error {
|
||||
}
|
||||
|
||||
go func() {
|
||||
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
|
||||
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.httpHostPort))
|
||||
|
||||
switch err := s.httpServer.Serve(s.httpConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
@@ -480,7 +366,7 @@ func (s *Server) Start() error {
|
||||
}
|
||||
|
||||
go func() {
|
||||
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
|
||||
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.privateHostPort))
|
||||
|
||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
@@ -506,15 +392,15 @@ func (s *Server) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
func (s *Server) Stop(ctx context.Context) error {
|
||||
if s.httpServer != nil {
|
||||
if err := s.httpServer.Shutdown(context.Background()); err != nil {
|
||||
if err := s.httpServer.Shutdown(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.privateHTTP != nil {
|
||||
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
|
||||
if err := s.privateHTTP.Shutdown(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -522,52 +408,42 @@ func (s *Server) Stop() error {
|
||||
s.opampServer.Stop()
|
||||
|
||||
if s.ruleManager != nil {
|
||||
s.ruleManager.Stop()
|
||||
s.ruleManager.Stop(ctx)
|
||||
}
|
||||
|
||||
// stop usage manager
|
||||
s.usageManager.Stop()
|
||||
s.usageManager.Stop(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRulesManager(
|
||||
promConfigPath,
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
alertmanager alertmanager.Alertmanager,
|
||||
sqlstore sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
orgGetter organization.Getter,
|
||||
querier querier.Querier,
|
||||
logger *slog.Logger,
|
||||
) (*baserules.Manager, error) {
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
PqlEngine: pqle,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
TelemetryStore: telemetryStore,
|
||||
Prometheus: prometheus,
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
Querier: querier,
|
||||
SLogger: logger,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
Alertmanager: alertmanager,
|
||||
SQLStore: sqlstore,
|
||||
OrgGetter: orgGetter,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/app/api"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/types"
|
||||
"go.signoz.io/signoz/pkg/types/authtypes"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler) (*types.GettableUser, error) {
|
||||
patToken, ok := authtypes.UUIDFromContext(ctx)
|
||||
if ok && patToken != "" {
|
||||
zap.L().Debug("Received a non-zero length PAT token")
|
||||
ctx := context.Background()
|
||||
dao := apiHandler.AppDao()
|
||||
|
||||
pat, err := dao.GetPAT(ctx, patToken)
|
||||
if err == nil && pat != nil {
|
||||
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
|
||||
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
|
||||
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
|
||||
return nil, fmt.Errorf("PAT has expired")
|
||||
}
|
||||
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
|
||||
if apiErr != nil {
|
||||
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
|
||||
return nil, apiErr
|
||||
}
|
||||
user, err := dao.GetUser(ctx, pat.UserID)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
telemetry.GetInstance().SetPatTokenUser()
|
||||
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
|
||||
user.User.GroupID = group.ID
|
||||
user.User.ID = pat.Id
|
||||
return &types.GettableUser{
|
||||
User: user.User,
|
||||
Role: pat.Role,
|
||||
}, nil
|
||||
}
|
||||
if err != nil {
|
||||
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return baseauth.GetUserFromReqContext(ctx)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user