Compare commits

...

365 Commits

Author SHA1 Message Date
Srikanth Chekuri
b24fadaf86 chore: pin version 0.18.3 2023-04-26 14:13:54 +05:30
Palash Gupta
c149181924 fix: dashboard variable is fixed (#2633) 2023-04-26 13:57:54 +05:30
Prashant Shahi
efe34d2582 Merge branch 'main' into release/v0.18.2 2023-04-21 15:05:17 +05:30
Prashant Shahi
d63a35e937 chore: 📌 pin versions: SigNoz 0.18.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-04-21 15:16:36 +05:45
Palash Gupta
c49bb0696b fix: clear filter is fixed (#2544)
* fix: clear filter is fixed

* chore: action bar empty query condition is handled

* feat: local state is clear for filters

(cherry picked from commit 6c11c6d4da)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-04-21 15:12:36 +05:45
Palash Gupta
9a58cc652c feat: custom time frame is updated (#2564)
(cherry picked from commit fb1e823e6b)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-04-21 15:09:35 +05:45
Palash Gupta
5ee0bb57cc fix: max depth issue is fixed in dashboard (#2563)
(cherry picked from commit 1726469aaa)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-04-21 15:09:11 +05:45
Ankit Nayan
c8f3e9024c Merge pull request #2539 from SigNoz/release/v0.18.1
Release/v0.18.1
2023-04-03 16:19:24 +05:30
Prashant Shahi
8f6178f0a9 chore: 📌 pin versions: SigNoz 0.18.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-04-03 15:47:54 +05:45
Palash Gupta
5ff9172103 fix: slider duration is fixed (#2537) 2023-04-03 15:26:47 +05:30
Palash Gupta
67ba46abde fix: 2427 Latency graph onclick is updated (#2534) 2023-04-03 14:00:36 +05:30
Palash Gupta
20b1f96c19 fix: global time navigation is fixed (#2533) 2023-04-03 13:30:48 +05:30
Yevhen Shevchenko
61a1d04252 feat(builder): add aggregator filter (#2516)
Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-04-01 11:29:35 +05:30
Ankit Nayan
80171eddea Merge pull request #2531 from SigNoz/release/v0.18.0
Release/v0.18.0
2023-03-31 18:15:08 +05:30
Prashant Shahi
28684423d1 chore: 📌 pin versions: SigNoz 0.18.0, SigNoz OtelCollector 0.66.7
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-03-31 16:59:21 +05:45
Palash Gupta
037559537b feat: keys for the service map is updated (#2525) 2023-03-31 12:59:57 +05:30
Nityananda Gohain
31a89bfdb3 fix: case sensitive selected field search fixed (#2529) 2023-03-31 11:58:58 +05:30
Prashant Shahi
36610c809e CI: deployment workflow changes (#2527)
* chore: 📌 bump up appleboy/ssh-action to v0.1.8

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* ci(deployments): 🔧 use SSH_KEY secret

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* ci(staging-deployment): 👷 use main tag of OtelCollectors in Staging

Signed-off-by: Prashant Shahi <prashant@signoz.io>

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-03-30 23:57:29 +05:30
Palash Gupta
1a0c76a43b fix: min and max time is removed from the dependency list (#2522) 2023-03-30 16:16:46 +05:30
Palash Gupta
c1d00c1155 fix: dependecies is updated (#2510) 2023-03-29 18:55:30 +05:30
Palash Gupta
3f96325ad8 ability to filter by deployment environment service map (#2506) 2023-03-29 18:31:59 +05:30
Palash Gupta
99ed314fc9 feat: resource attribute is added in the exception (#2491)
* feat: resource attribute is added in the exception

* fix: build is fixed

* chore: methods is updated to post

* fix: build is fixed

* fix: listErrors, countErrors API request body

* chore: type of the function is updated

* chore: convertRawQueriesToTraceSelectedTags is updated

* fix: resource attribute is updated

* chore: selected tags is updated

* feat: key is updated

---------

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-03-29 14:45:58 +05:30
Vishal Sharma
12e56932ee fix: exception detail broken APIs due to resourceTagsMap (#2514) 2023-03-29 07:32:47 +05:30
Srikanth Chekuri
c4944370ce feat: support environment filtering in service map (#2481) 2023-03-28 22:15:46 +05:30
Chintan Sudani
192d3881a1 fix: commented unwanted sidebar menu option (#2513)
* fix: Removed Strict mode to stop render twice

* fix: commented unwanted sidebar menuoption
2023-03-28 21:22:06 +05:30
Vishal Sharma
9d20c2f787 feat: add resource tags to ListErrors API (#2487) 2023-03-28 00:15:15 +05:30
Yevhen Shevchenko
8ea0f72178 feat(UI): add new query label (#2488) 2023-03-27 16:49:49 +05:30
Yevhen Shevchenko
167050b4b5 feat(provider): add base query types (#2501)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-03-27 14:04:06 +05:30
Palash Gupta
fe640aae39 fix: label is added in the tabs (#2507) 2023-03-27 12:11:35 +05:30
Yevhen Shevchenko
6c2faa21f4 fix(query): change correct position of provider (#2498) 2023-03-24 18:16:28 +05:30
Yevhen Shevchenko
c617784d7c feat(provider): add query builder provider (#2496)
Co-authored-by: Yevhen Shevchenko <yevhen@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-03-24 17:09:31 +05:30
GitStart
1e7280136a fix: view traces button (#2458) 2023-03-24 15:08:35 +05:30
Srikanth Chekuri
17a5bc8cc3 feat: metrics query range v3 (#2265) 2023-03-23 19:45:15 +05:30
Chintan Sudani
c3763032df feat: added submenu system at sidebar (#2486)
* fix: Removed Strict mode to stop render twice

* feat: added submenu system at sidebar
2023-03-23 14:50:17 +05:30
GitStart
da4cbf6c2f fix: tabs deprecation warning from antd (#2479)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-03-22 12:01:37 +05:30
GitStart
97bfee48e1 fix: slider deprecation warning from antd (#2478)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-03-22 11:33:36 +05:30
Ankit Anand
da23d9e087 Update README.md (#2480)
Added pic for exceptions monitoring, added shadow on app metrics image.

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2023-03-22 10:23:21 +05:30
Nityananda Gohain
27db1b9080 Merge pull request #2456 from SigNoz/feat/opamp-logparing
feat: logs parsing pipeline support in opamp
2023-03-22 09:55:53 +05:30
Nityananda Gohain
55d7285c9a Merge branch 'develop' into feat/opamp-logparing 2023-03-22 09:47:03 +05:30
Vishal Sharma
27c48674d4 fix: update query range params (#2453) 2023-03-21 22:53:56 +05:30
Ankit Anand
d29dfa0751 Update README.md (#2475)
Updated product screenshots, bullet points for features
2023-03-21 16:21:49 +05:30
Srikanth Chekuri
d951483597 fix: substitute nan negative rate from couter resets (#2449) 2023-03-21 11:47:04 +05:30
Chintan Sudani
481792d4ca fix: create/edit panel shows a blank page (#2473) 2023-03-20 18:46:20 +05:30
Vishal Sharma
0fa20445d8 Merge branch 'develop' into feat/opamp-logparing 2023-03-20 17:40:09 +05:30
nityanandagohain
eb4ac18162 feat: processor builder updated with new logic and tests 2023-03-17 17:39:28 +05:30
Palash Gupta
1ddda19c8e feat: table view is updated for body field (#2465) 2023-03-17 15:21:02 +05:30
Palash Gupta
91c3abae37 feat: editor is updated (#2464) 2023-03-17 15:12:31 +05:30
nityanandagohain
b5debe6ea2 Merge remote-tracking branch 'upstream/feat/opamp-logparing' into feat/opamp-logparing 2023-03-16 16:39:11 +05:30
nityanandagohain
7367f8dd4b fix: tests fixed 2023-03-16 10:24:20 +05:30
nityanandagohain
bac717e9e6 fix: use structs instead of interface 2023-03-16 10:24:08 +05:30
nityanandagohain
e1219ea942 fix: use structs instead of interface 2023-03-16 10:20:57 +05:30
nityanandagohain
1c867d3b4c Merge remote-tracking branch 'upstream/develop' into feat/opamp-logparing 2023-03-15 20:36:01 +05:30
Nityananda Gohain
65c2a0bf6a Merge pull request #2455 from SigNoz/feat/last10versions
fix: get last n versions in getConfigHistory
2023-03-15 20:27:07 +05:30
nityanandagohain
755d64061e fix: minor spelling fixes 2023-03-15 17:55:02 +05:30
nityanandagohain
500ab02c47 chore: logs parsing pipeline support in opamp 2023-03-15 17:42:24 +05:30
nityanandagohain
dfef41913f fix: get last 10 versions in getConfigHistory 2023-03-15 16:26:46 +05:30
Srikanth Chekuri
210c5fd7f2 feat: opamp server application (#1787)
* feat: opamp server application

* chore: opamp

* chore: refactor server implementation

* chore: add Stop

* chore: merged opamp updates

* chore: removed all errorf

* chore: added a comment about zero version

* feat: added user context for created by

* chore: changed debugf to debug

* chore: removed lb from opamp + added config parser

* fix: added userid to ConfigNewVersion()

* chore: removed user id from contxt and added config parser

* fix: removed lock inside re-deploy

* chore: added config db fix

* fix: merged app/server.go from develop

* fix: restored extract jwt

* Update pkg/query-service/app/server.go

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>

* fix: dependency version fix and import added

---------

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2023-03-15 15:09:15 +05:30
Rajat Dwivedi
c4b052c51e upgraded some deprecated packages (#2424)
* fix: upgrade deprecated pkg

* fix: reverted linebreak rules

* chore: some of the refactoring is done regarding the performance

---------

Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com>
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-03-14 16:55:15 +05:30
Ankit Nayan
da79f93495 Merge pull request #2442 from SigNoz/release/v0.17.0
Release/v0.17.0
2023-03-11 19:51:41 +05:30
Srikanth Chekuri
83e3e3c3ed Merge branch 'develop' into release/v0.17.0 2023-03-11 16:47:38 +05:30
Srikanth Chekuri
7508c9148f fix: address legend formatting for external call error % (#2443) 2023-03-11 16:44:58 +05:30
Srikanth Chekuri
b15463fd38 chore: pin versions - SigNoz 0.17.0, SigNoz OtelCollector 0.66.6 2023-03-10 21:52:42 +05:30
palashgdev
66b2e17bba feat: color coding is added in the table view (#2437) 2023-03-10 13:55:42 +05:30
Srikanth Chekuri
9af991e424 feat: add attrs filters autocomplete endpoints (#2264) 2023-03-10 11:22:34 +05:30
Maciej Wakuła
59497ed53c Pop!OS support (same as ubuntu) #2417 (#2420) 2023-03-10 03:49:02 +05:30
palashgdev
7f04a4407b feat: color coding is added in the list view (#2432) 2023-03-07 18:15:54 +05:30
palashgdev
2a03291171 feat: body is added in the log (#2431) 2023-03-07 18:07:23 +05:30
palashgdev
53bfc33075 chore: panel Type is disabled for now (#2434) 2023-03-07 17:49:18 +05:30
GitStart
c821e8bb75 feat: add ability to change panel type (#2383)
* feat: add ability to change panel type

* feat: add ability to change panel type

---------

Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-03-07 16:55:59 +05:30
GitStart
eff87f2666 feat: move form into useForm from antd (#2403)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: palashgdev <palashgdev@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-03-07 14:56:56 +05:30
Srikanth Chekuri
3f5171dc69 chore: bump SigNoz/prometheus (#2094) 2023-03-07 13:37:31 +05:30
Srikanth Chekuri
c5d7d9d134 feat: ability to save and retrieve the explorer queries (#2284) 2023-03-07 00:26:25 +05:30
Srikanth Chekuri
6defa0ac8b feat: metric attribute autocomplete for the aggregation type (#2263) 2023-03-04 00:05:16 +05:30
Srikanth Chekuri
e3fee332c7 chore: update CODEOWNERS for */query-service/ (#2421) 2023-03-03 23:45:04 +05:30
Srikanth Chekuri
2c7cefcc74 fix: add the missing /health route removed in #2261 (#2419) 2023-03-03 18:07:24 +05:30
GitStart
080a53a9b4 fix: menu antd deprecation warning (#2416)
* fix: menu antd deprecation warning

* chore: some of the refactoring is updated

---------

Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-03-03 16:39:24 +05:30
Vishal Sharma
2a5cb78964 feat: add span links support (#2415)
* feat: add span links support

* fix: handle an edge case

* chore: test is fixed

* chore: some of the refactoring is updated

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-03-03 14:35:11 +05:30
Ankit Nayan
b99d7009a1 Merge pull request #2261 from ahsanbarkati/ahsan/pat
feat(PAT): Add personal access token for programmatic access
2023-03-02 11:01:14 +05:30
palashgdev
e46b7e41e5 feat: restricted_SELECTED_FIELDS is filtered from the selected list (#2401)
* feat: restricted_SELECTED_FIELDS is filtered from the selected list

* chore: selected id fields is removed from the rendering part
2023-03-01 17:26:37 +05:30
palashgdev
50270281e3 fix: Logs Live Tail is fixed (#2380)
* logs is updated

* fix: log live tail is updated

* fix: live tail is fixed

* chore: build is fixed

* chore: useEffect is removed

* chore: getLogsAggregate callback is added in the useEffect
2023-03-01 17:18:02 +05:30
Srikanth Chekuri
5e5e81d81d chore: add payload types for autocomplete requests (#2244) 2023-03-01 10:55:07 +05:30
Ahsan Barkati
eb2fe20025 Address review comments 2023-03-01 00:11:44 +05:30
Ahsan Barkati
df7f276f03 Change header name 2023-03-01 00:11:44 +05:30
Ahsan Barkati
b0f62daa24 Cleanup rbac.go 2023-03-01 00:11:44 +05:30
Ahsan Barkati
797352583a Create PAT supporting auth middleware 2023-03-01 00:11:44 +05:30
Ahsan Barkati
96267e2e3a Add GetPAT function 2023-03-01 00:06:33 +05:30
Ahsan Barkati
388ef9453c Add APIs for PAT 2023-03-01 00:06:33 +05:30
Prashant Shahi
995e45713c chore: health endpoint related changes (#2275) 2023-02-28 23:42:21 +05:30
Mary Ojo
b0d5b15330 style: corrected the positioning of the charts tooltip (#2402)
* style: corrected the positioning of the charts tooltip

* style: stored value for pixel in variable

* chore: logic is shifted to plugin

---------

Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-28 14:16:31 +05:30
palashgdev
80cd317b3b feat: color encoding is added in the logs raw view (#2398)
* chore: some of the changes are updated

* feat: ansi-to-html is added

* feat: color is added in the raw view
2023-02-28 11:03:02 +05:30
Ankit Nayan
51721f97c7 Merge pull request #2379 from SigNoz/release/v0.16.2
Release/v0.16.2
2023-02-24 19:31:06 +05:30
Srikanth Chekuri
e7e0f5b96a chore: pin version: SigNoz 0.16.2 2023-02-24 18:14:25 +05:30
Srikanth Chekuri
a26ebb742a chore: bump signoz/signoz-otel-collector version (#2378)
Merged on recommendation of @srikanthccv 

* chore: bump signoz/signoz-otel-collector version

* chore: bump everywhere
2023-02-24 17:34:00 +05:30
Amol Umbark
9d1305f174 fix: resolves alert charts issue with 1 hr timerame (#2377) 2023-02-24 15:09:30 +05:30
Amol Umbark
ab514cc0f2 fix: changed ask admin message (#2215) 2023-02-24 14:57:07 +05:30
palashgdev
1f44f089e0 feat: multiple values can be selected (#2365)
* feat: multiple values can be selected

* chore: tag value is updated

* fix: handle few edge cases

---------

Co-authored-by: makeavish <makeavish786@gmail.com>
2023-02-23 23:54:16 +05:30
Chintan Sudani
174fc107c2 fix: scrollbar issue on widget (#2359)
* fix: Removed Strict mode to stop render twice

* fix: scrollbar issue on widget
2023-02-23 17:06:57 +05:30
palashgdev
06a55ccdd6 chore: linebreak style is updated (#2277)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-02-23 13:10:41 +05:30
Srikanth Chekuri
a3731e4c4e fix: error rate as a percentage of range 0-100% (#2311) 2023-02-23 11:15:14 +05:30
Chintan Sudani
e183cace75 fix: null value handle on create dashboard (#2320)
* fix: Removed Strict mode to stop render twice

* fix: null value handle on create dashboard
2023-02-22 16:22:02 +05:30
Srikanth Chekuri
23490ca7f8 fix: operator should be IN for top level operations (#2304) 2023-02-22 12:10:32 +05:30
Nityananda Gohain
9f71e732c7 Merge pull request #2301 from SigNoz/feat/attribute-fix
fix: attribute name corrected in logs database
2023-02-22 09:44:23 +05:30
nityanandagohain
23d6287594 fix: attribute name corrected in logs database 2023-02-21 10:52:03 +05:30
palashgdev
2624ce4007 feat(FE): span Kind is added in the trace filter page (#2281) 2023-02-20 19:12:54 +05:30
palashgdev
3d5134b43c fix: width is added for the max content (#2292) 2023-02-20 17:28:38 +05:30
GitStart
c657f96032 fix: overflowing last timestamp (#2271)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-17 11:28:09 +05:30
GitStart
c18fff6ae8 FE: remove @types/redux (#2274)
* FE: remove @types/redux

* Update package.json

---------

Co-authored-by: gitstart <gitstart@users.noreply.github.com>
2023-02-17 11:04:51 +05:30
palashgdev
28142764af chore: testMatch is updated (#2270) 2023-02-15 18:32:40 +05:30
palashgdev
2fa265ff2e fix: onDrag is updated (#2267) 2023-02-15 16:03:53 +05:30
palashgdev
dca0b11acd fix: onSearch callback is updated (#2266) 2023-02-15 15:49:24 +05:30
volodfast
bad80def90 feat: add list and table views for logs (#2163)
* feat: add list and table views for logs

* chore: some of the changes are updated

* chore: some of the refactoring is done

* chore: px to updated to rem

* chore: constant is moved to local storage

* refactor: some of the refactoring is updated

* chore: some of the changes are updated

* fix: resize log table issue

* chore: logs is updated

* chore: resize header is updated

* chore: font observer is added in package json and hook is added for same

* chore: no logs text is updated

* chore: no logs text is updated

* chore: updated some feedback in raw logs line

* chore: types is added

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Chintan Sudani <csudani7@gmail.com>
2023-02-15 14:55:15 +05:30
Ankit Nayan
8965b9b503 Merge branch 'main' into develop 2023-02-15 14:22:06 +05:30
Kolesnyk Anton
05076968c9 fix: it has been fixed of difficult to click on metrics graph points (#2207)
* fix: it has been fixed of difficult to click on metrics graph points

* fix: resolve conflict

* fix: changed hover point & memoized the passed props

* fix: memo from develop

* fix: add condition for end and start stamps

* chore: type position is updated

---------

Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-15 10:50:39 +05:30
Prashant Shahi
7c8afc2e1c Merge pull request #2258 from SigNoz/release/v0.16.1
Release/v0.16.1
2023-02-15 01:47:43 +05:30
Prashant Shahi
c8a1a8600e chore: 📌 pin version: SigNoz 0.16.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-15 00:41:23 +05:30
Prashant Shahi
45cb1eb38f feat: introduce health check endpoint (#2257)
* feat(query-service):  Add health check route and handler

* chore(install-script): 🔧 use health endpoint with instead of services list

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-15 00:37:57 +05:30
Vishal Sharma
8ebb76bd0c fix: resource attribute tag key type is updated (#2231)
* fix: resource attribute tag key type is updated
from array to string

* chore: convert tag key from array to string
2023-02-14 10:48:47 +05:30
palashgdev
309ffa4989 chore: changes are updated for package.json (#2233) 2023-02-14 10:20:21 +05:30
Ankit Nayan
d787298600 Merge branch 'develop' of https://github.com/SigNoz/signoz into develop 2023-02-12 09:38:23 +05:30
GitStart
7998d474e2 FE: Create a single instance of notification in form of Context Provider and use it across whole app (#2196)
* feat: create notification context provider

* chore: import is updated to absolute import

---------

Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: Palash <palashgdev@gmail.com>
2023-02-12 08:53:00 +05:30
Pranay Prateek
7b8ff5a285 Update README.md 2023-02-12 00:29:14 +05:30
Ankit Nayan
cb22aef36f Merge pull request #2225 from SigNoz/release/v0.16.0
Release/v0.16.0
2023-02-11 23:52:58 +05:30
Ankit Nayan
cf93712286 Merge branch 'develop' into release/v0.16.0 2023-02-11 23:15:47 +05:30
Ankit Nayan
a906f94b8a chore: reduce events 2023-02-11 23:15:07 +05:30
Kolesnyk Anton
93b6749920 fix: filters applied in the logs page (#2210)
* fix: filters applied in the logs page

* fix: remove console

* fix: adding of query params from query string to input

* fix: added parser

* chore: useSearch parser is updated with previous hooks

---------

Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-11 08:39:34 +05:30
Amol Umbark
8ab527b174 feat: support printing threshold in alert summary and description (#1827) 2023-02-10 23:53:45 +05:30
Prashant Shahi
ad163c2b61 chore: 📌 pin versions: SigNoz 0.16.0, SigNoz OtelCollector 0.66.4
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-10 23:50:45 +05:30
Prashant Shahi
21f909f4c0 chore: 🔧 Add low cardinal exception grouping configuration
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-10 23:50:15 +05:30
palashgdev
b67206dd65 fix: graph component is memorised (#2223) 2023-02-10 18:02:37 +05:30
yun asny23
ce5afd31fd fix: indent spaces in yml (#1657) 2023-02-10 16:41:16 +05:30
palashgdev
9a184f5740 fix: dark mode is fixed (#2220) 2023-02-10 13:40:50 +05:30
Amol Umbark
be14f1c32c fix: removed direct ref to form item (#2221)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2023-02-10 12:29:41 +05:30
GitStart
ae37a608f8 fix: queries B and C coupling in a dashboard panel (#2218)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
2023-02-10 11:00:38 +05:30
Vishal Sharma
aaeb579d0d chore: remove external metrics to trace nav (#2213) 2023-02-09 16:00:48 +05:30
palashgdev
1151e8521e test: traceGraphFilter/utils selectedGroupByValue is added (#2201)
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-02-08 12:58:53 +05:30
Vishal Sharma
d779b83715 feat: navigate to trace from metrics (#2191)
* feat: navigate to trace from metrics

* chore: add sonar back

* chore: refactor
2023-02-08 12:41:55 +05:30
Fernando Pimenta
47a41473df Added support for installing SigNoz on RockyLinux (#2203)
Co-authored-by: Fernando Pimenta <fernandopimenta@tecnosys.com.br>
2023-02-08 10:35:52 +05:30
volodfast
de370d7f0c feat: increase chart point visibility (#2185) 2023-02-07 20:59:11 +05:30
volodfast
8a5b26cefe feat: highlight nearest in chart on hover (#2184)
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-07 16:55:33 +05:30
palashgdev
2a20b6fc86 fix: unit test is fixed with react 18 (#2199) 2023-02-07 16:42:49 +05:30
Vishal Sharma
02ef1744b4 feat: add autocomplete to groupBy filters (#2156)
* feat: add autocomplete to groupBy filters

* chore: handle none groupby in frontend

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-02-07 11:41:09 +05:30
palashgdev
2c973adf0b chore: removed react-vis dependecies (#2182) 2023-02-06 15:20:27 +05:30
Axay Sagathiya
f7ff491d35 Add error check in unit tests. (#1993) 2023-02-06 08:38:47 +05:30
Yash Joshi
6cd341a887 fix: redirect to latest tag release notes (#1970)
* fix: redirect to latest tag release notes

* chore: some refactoring is updated

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-02-03 21:51:18 +05:30
Palash Gupta
0832bce955 chore: some of the eslint rules disabling is removed and types are removed (#2152)
Co-authored-by: Chintan Sudani <46838508+csudani7@users.noreply.github.com>
2023-02-03 20:32:22 +05:30
Chintan Sudani
62b2462e03 feat: modified resize table component (#2175)
* fix: Removed Strict mode to stop render twice

* feat: modified resize table component
2023-02-03 18:06:26 +05:30
Chintan Sudani
152846f554 feat: Added Resizable Wrapper for Ant Design Table (#2014)
* feat: Added Resizable Wrapper for AntD Table

* chore: Merging upstream develop into fork

* chore: updated lock file

* fix: Lint issues resolved

* fix: Lint issues resolved

* fix: Types issues

* fix: linting issues

* fix: Types issues

* fix: POC of new resize lib

* fix: linting issues

* chore: resize is updated

* fix: added old lib logic

* fix: removed console.log

* chore: types are updated

* chore: removed un used style

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-02-02 16:53:15 +05:30
GitStart
846da08cbd refactor: antdv5 notfications (#2161)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: Nitesh Singh <nitesh.singh@gitstart.dev>
Co-authored-by: gitstart-app[bot] <57568882+gitstart-app[bot]@users.noreply.github.com>
Co-authored-by: Rubens Rafael <70234898+RubensRafael@users.noreply.github.com>
Co-authored-by: RubensRafael <rubensrafael2@live.com>
Co-authored-by: niteshsingh1357 <niteshsingh1357@gmail.com>
Co-authored-by: gitstart_bot <gitstart_bot@users.noreply.github.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-02-02 11:38:32 +05:30
Palash Gupta
17f32e9765 feat: global time is updated (#2013) 2023-02-02 11:12:12 +05:30
Chintan Sudani
48659a2957 fix: resolved violating issue on change of layout API call (#2164)
* fix: Removed Strict mode to stop render twice

* fix: resolved issue on change of layout API call
2023-02-02 10:52:14 +05:30
GitStart
a2a8a32d1c fix: different time formats in hover legend and x-axis on charts (#2040)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: niteshsingh1357 <niteshsingh1357@gmail.com>
Co-authored-by: Nitesh Singh <nitesh.singh@gitstart.dev>
Co-authored-by: gitstart-app[bot] <57568882+gitstart-app[bot]@users.noreply.github.com>
Co-authored-by: Rafael <rafael.toledo@engenharia.ufjf.br>
Co-authored-by: gitstart_bot <gitstart_bot@users.noreply.github.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-02-01 15:04:17 +05:30
ezio ruan
28f2ee2627 Update README.md (#2139) 2023-01-31 19:38:51 +05:30
Ankit Nayan
3b01bb2614 Merge pull request #2147 from SigNoz/release/v0.15.0
Release/v0.15.0
2023-01-31 16:58:45 +05:30
Prashant Shahi
622e1765cf Merge branch 'develop' into release/v0.15.0 2023-01-31 16:21:38 +05:30
Amol Umbark
faaf0a6e73 fix: solved re-render issue when input fields were edited (#2149)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2023-01-31 14:46:03 +05:30
Prashant Shahi
4542a51531 Merge branch 'main' into release/v0.15.0 2023-01-31 00:56:31 +05:30
Prashant Shahi
191a538430 chore: 📌 pin versions: SigNoz 0.15.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-31 00:22:47 +05:30
Prashant Shahi
e6ce80213b Merge branch 'develop' into release/v0.15.0 2023-01-31 00:20:08 +05:30
Palash Gupta
3115b32dcd fix: interval is blocked for custom time selection (#2146)
* fix: interval is blocked for custom time selection

* fix: custom is updated

* chore: selectedTime is updated in hidden logic
2023-01-30 19:27:13 +05:30
Chintan Sudani
af272a368b fix: added lazy loading on dashboard (#2133)
* fix: Removed Strict mode to stop render twice

* fix: added lazy loading on dashboard

* fix: suggested changes

* fix: added react-intersection-observer changes

* fix: resolved multiple time api call issue

* chore: variable name is updated

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-30 18:32:05 +05:30
Palash Gupta
b336a6cb45 fix: widget options are now opening (#2141) 2023-01-30 18:06:49 +05:30
Fellipe Montes
b72815ca2f FIX: Exported dashboard include response of the queries #1981 (#2052)
* clear the queryData
* avoid creation of inline func and move logic to utils
* remove console.log
* fix
2023-01-30 16:07:23 +05:30
Amol Umbark
ed4a01dea6 fix: log issue remove field in query panel (#2130) 2023-01-27 13:27:59 +05:30
Vishal Sharma
1914c3b4a0 chore: update install message in install.sh script (#2131) 2023-01-27 12:53:23 +05:30
Prashant Shahi
3811e96e23 chore: 📌 pin versions: SigNoz OtelCollector 0.66.3 in standalone Docker
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-27 11:11:25 +05:30
Prashant Shahi
8d16493432 chore: 📌 pin versions: SigNoz OtelCollector 0.66.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-26 14:30:18 +05:30
Vishal Sharma
db2bfbb887 fix: tag filter query builder (#2125) 2023-01-26 01:18:19 +05:30
Chintan Sudani
213838a021 fix: Chart loaders on reload and change of time interval at dashboard (#2068)
* fix: Chart data logic on dashboard reloads

* fix: linting issues

* fix: added right side loader & css config

* fix: loader condition change

* fix: linting issues

* fix: error state of API

* fix: Resolved suggested changes

* fix: Error state for API Failed

* fix: Default loading state

* fix: linting issues

* fix: Suggested changes

* feat: Added common hook for previous value

* chore: usePrevious is made type safety

* chore: chart data set is updated

* chore: removed eslint rule

* fix: commitlint issue on commit

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-25 20:31:42 +05:30
Pranay Prateek
fd6f9a90e1 removing repostats workflow (#2053)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-25 20:13:57 +05:30
Prashant Shahi
13f9922c53 chore(frontend): 🔧 support ARM and copy yarnrc in Dockerfile (#2119)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-25 19:58:44 +05:30
Chintan Sudani
a654baaa5b fix: graph flickering issue on trace page (#2120)
* fix: Removed Strict mode to stop render twice

* fix: graph flickering issue on trace page
2023-01-25 19:55:33 +05:30
Palash Gupta
f766435acc feat: popover is added in the trace tag search (#2118)
* feat: popover is updated

* chore: arrow is removed and padding is removed

* chore: width is updated
2023-01-25 18:56:15 +05:30
Marius Kimmina
d7a65ba689 chore: remove not needed code comments (#2054)
Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-25 16:10:22 +05:30
Vishal Sharma
05ce03e67d feat: tag filtering frontend changes (#2116)
feat: tag filtering frontend changes
2023-01-25 15:20:27 +05:30
Palash Gupta
ba6818f487 fix: total count is usage explorer (#2117)
* fix: total count is usage explorer

* chore: no spans found is also wrapped under typography
2023-01-25 14:55:39 +05:30
Srikanth Chekuri
ca53136cbf feat(ui): dashboard variable chaining (#2037)
* feat: dashboard variable chaining

* feat(ui): dashboard variable chaining

* chore: update vars loading

* chore: fix lint

* chore: better dependent vars

* chore: multi dependent variables

* chore: add more user friendly error

* chore: review comments

* chore: address review comments

* chore: remove string assertion

* chore: fix build by updating types

* chore: fix the variable data auto loading

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-25 13:22:57 +05:30
Vishal Sharma
c46bef321c feat: tag filter backend changes (#2115) 2023-01-25 12:35:44 +05:30
Palash Gupta
ba8f804b26 fix: yarnrc is added in the root of the frontend (#2114)
Co-authored-by: Prashant Shahi <prashant@signoz.io>
2023-01-25 12:11:22 +05:30
Chintan Sudani
6cc7025e37 fix: Chart is not updating on change of variables (#2020)
* fix: Chart is not updating onchange of variables

* fix: Added useLocation hook for pathname

* fix: Lint issues resolved

* fix: Updated logic behind change of variables

* fix: Suggested changes of variable

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-25 10:54:36 +05:30
Palash Gupta
e62e541fc4 FE: added more eslint rule (#2090)
* chore: arrow-body-style func-style is added in the rule

* fix: linting issues fixed

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-24 18:53:04 +05:30
Palash Gupta
2f1ca93eda fix: tags is grabbed from the local state (#2106) 2023-01-24 17:42:48 +05:30
Priyanka Chakraborty
f1c7d72fc5 1375 overview querybuilder (#1983) 2023-01-24 09:30:26 +05:30
Chintan Sudani
a405307c96 fix: Redundant call on resize or rearrange layout on dashboard (#2099)
* fix: Removed Strict mode to stop render twice

* fix: Redundant call on resize or rearrange layout on dashboard

* fix: Resolved suggested changes

* fix: Resolved suggested changes

* chore: some of the refactoring is updated

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-23 20:21:24 +05:30
Ram S Gupta
c85d48d7fa remove no-shadow:off rules from eslint rule list (#2093)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-23 17:15:18 +05:30
Chintan Sudani
75470f6bb9 fix: Removed Strict mode to stop render twice (#2097) 2023-01-23 17:01:45 +05:30
Palash Gupta
f75e688b32 feat: text is handled under light and dark mode (#2087) 2023-01-23 10:40:27 +05:30
Vishal Sharma
5f3ca045df fix: dockerfile clickhouse indentation issue (#2083) 2023-01-20 00:09:46 +05:30
Chintan Sudani
186632af69 fix: Changed Legends UI & Scrollable (#2078)
* fix: Changed Legends UI & Scrollable

* fix: Changed axis label color

* fix: Changed Legends UI & Scrollable

* chore: Removed other issues changes

* fix: linting issues

* fix: changed fontsize of legend

* fix: changed height of legend

* chore: px is updated to rem

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-18 19:53:45 +05:30
Chintan Sudani
fa652be926 fix: Changed axis label color (#2080)
* fix: Changed axis label color

* fix: linting issues

* chore: helpers is updated

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-18 19:40:15 +05:30
volodfast
1e39131c38 feat: drag select timeframe on charts (#2018)
* feat: add drag select functionality to chart

* fix: use redux stored values for time frame selection

* fix: ignore clicks on chart without dragging

* feat: add intersection cursor to chart

* refactor: update drag-select chart plugin

* fix: respond to drag-select mouseup outside of chart

* fix: remove unnecessary chart update

* feat: add drag-select to dashboard charts

* refactor: add util functions to create custom plugin options

* fix: enable custom chart plugins

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2023-01-17 17:00:34 +05:30
Ankit Nayan
153e859ac3 Fix/analytics (#2049)
* fix: incorrect calculation
* chore: adding nil check

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-17 15:28:58 +05:30
Fellipe Montes
d1cc29e118 Create: Widget Header in the Loading State #2042 (#2048)
* create a visual loading state with header

* updates loading with WidgetHeader component

* chore: onview and ondelete is updated

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-17 11:37:30 +05:30
Priyanka Chakraborty
972bf94dd0 refactor: tagFilteritems-refactored (#2056)
* refactor: tagFilteritems-refactored

* refactor: wrapper-over-getwidget

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-16 18:05:13 +05:30
Palash Gupta
3632208d45 Fix: live tail memory (#2033)
* feat: react is updated to v18

* feat: logs card is updated
2023-01-16 17:56:46 +05:30
Srikanth Chekuri
cd9768c738 feat: dashboard variable chaining (#2036) 2023-01-16 14:57:04 +05:30
Pranay Prateek
f01b9605db Update README.md 2023-01-16 13:03:15 +05:30
GitStart
eec236af50 Add visual feedback on Copy JSON in Log filter page (#2055)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: niteshsingh1357 <niteshsingh1357@gmail.com>
Co-authored-by: Nitesh Singh <nitesh.singh@gitstart.dev>
Co-authored-by: Thiago Nascimbeni <tnascimbeni@gmail.com>
Co-authored-by: gitstart_bot <gitstart_bot@users.noreply.github.com>
2023-01-16 12:03:35 +05:30
Fellipe Montes
bbff2b459e Fix: Invite links do not work if name is not given when creating the invite #2008 (#2026) 2023-01-13 21:37:36 +05:30
Chintan Sudani
d9535e7a8d fix: Trigger Save layout only on title (#2039)
* fix: Trigger Save layout only on title

* chore: code improvement

* fix: Lint issues resolved

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-13 17:29:51 +05:30
volodfast
a82bbe1a72 chore: update chartjs to version 3.9.1 (#2041) 2023-01-13 17:07:28 +05:30
Fellipe Montes
6812f55152 change CSS and isEllipsed variable (#2035)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-13 14:07:23 +05:30
Chintan Sudani
83163c17cd fix: Added Extra color code to stop repeat same color (#2015) 2023-01-13 13:50:11 +05:30
Palash Gupta
5ed7c9a46e feat: react is updated to v18 (#2030) 2023-01-13 12:01:46 +05:30
Ankit Nayan
2f323056d0 Merge pull request #2034 from SigNoz/release/v0.14.0
Release/v0.14.0
2023-01-12 18:55:14 +05:30
Prashant Shahi
51b583480b chore: 📌 pin versions: SigNoz 0.14.0, SigNoz OtelCollector 0.66.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-12 17:56:29 +05:30
Srikanth Chekuri
7b1e2c8b98 fix: use target arch amd64 (#2027) 2023-01-12 11:27:48 +05:30
Srikanth Chekuri
b87f3bdb50 fix: query builder formula fails to eval (#1999)
* fix: query builder formula fails to eval

* fix: result label set without reference

* chore: update tests

Co-authored-by: Prashant Shahi <prashant@signoz.io>
2023-01-11 16:12:47 +05:30
Palash Gupta
2f5908a3dd feat: antd is updated from v4 to v5 (#2012)
* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* Update index.tsx

* fix: import is fixed

* chore: un used import is fixed

* fix: dark mode is updated in service map

* fix: config dropdown is updated

* fix: logs types is updated

* fix: copy clipboard notification is updated

* chore: layout changes are updated

* chore: colors is updated

* chore: action width is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-11 14:39:06 +05:30
Yash Joshi
ca77820e9d refactor: use antd form in organization display name (#2006)
* refactor: use antd form in organization display name

* chore: interface is now named interface

Co-authored-by: Palash <palashgdev@gmail.com>
2023-01-11 00:59:45 +05:30
Marius Kimmina
a4346a2d93 fix(FE): show no No Data on default Dashboards (#2003)
* fix(FE): show no No Data on default Dashboards

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>

* chore: removed un used styles

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-10 23:54:14 +05:30
Srikanth Chekuri
44360ecacf Add support for histogram quantiles (#1533) 2023-01-10 21:42:44 +05:30
Srikanth Chekuri
b675c3cfec fix: add signoz.collector.id to spanmetrics dimensions (#2001)
* fix: add service.instance.id to spanmetrics dimensions

* chore: update description

* chore: update the resource key
2023-01-10 19:21:17 +05:30
Marius Kimmina
b23d8da96c style: use 'no data' for empty graphs (#2002)
* style: use 'No Data' for empty graphs

* style: use 'No data' for empty graphs

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-10 10:44:59 +05:30
Ankit Nayan
215ea8d819 chore: different ticker interval for active user 2023-01-08 23:12:02 +05:30
Ankit Nayan
0c27d5acbc chore: better error handling 2023-01-08 22:49:11 +05:30
Ankit Nayan
435d74c37e Merge pull request #1996 from SigNoz/release/v0.13.1
Release/v0.13.1
2023-01-07 20:56:08 +05:30
Prashant Shahi
b35bdf01cc chore: 📌 pin versions: SigNoz 0.13.1 2023-01-07 18:22:02 +05:30
Ankit Nayan
9b654143bb chore: update latest loggedin user 2023-01-07 02:54:27 +05:30
Ankit Nayan
4841f150f4 fix: minor changes 2023-01-07 02:31:54 +05:30
Ankit Nayan
16a49a8b04 fix: minor changes 2023-01-07 02:21:44 +05:30
Ankit Nayan
1fd819b806 fix: added ratelimit to specific event 2023-01-07 00:16:57 +05:30
Ankit Nayan
cab9e04cdd fix: concurrent writes to map 2023-01-06 16:10:13 +05:30
Ankit Nayan
e8f341b850 Revert "feat: antdv5 is updated (#1880)" (#1991)
This reverts commit 7b86022280.
2023-01-06 13:40:31 +05:30
Ankit Nayan
1f6fcb9b8c Revert "feat: react is updated to v18 (#1948)" (#1990)
This reverts commit 1c7202b5bf.
2023-01-06 13:32:27 +05:30
Palash Gupta
1c7202b5bf feat: react is updated to v18 (#1948)
* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* feat: react is updated from v17 to v18

* feat: antdv5 is updated (#1880)

* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* Update index.tsx

* fix: import is fixed

* chore: un used import is fixed

* fix: dark mode is updated in service map

* fix: config dropdown is updated

* fix: logs types is updated

* fix: copy clipboard notification is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>

* chore: all channel is updated move from usefetch to usequery

* fix: typescript is fixed

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2023-01-04 22:58:05 +05:30
Ankit Nayan
24ac062bf5 Fix/analytics (#1987)
* fix: added server code to ee
2023-01-04 22:48:38 +05:30
Axay Sagathiya
b776bf5b09 Add Docs to install SQLite3 (#1924)
* add commands to install sqlite3 in Makefile.

* Add code to check if it's running on Linux System.

* Revert "Add code to check if its running on Linux"

This reverts commit 552cfb08c9.

* Revert "add commands to install sqlite3 in Makefi"

This reverts commit 781c23d12d.

* Add Docuentation to install SQLite3.

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-04 22:02:48 +05:30
Yash Joshi
144076e029 fix: disable button unless org name is different (#1984) 2023-01-04 18:20:02 +05:30
Vishal Sharma
835251b342 fix: use rpc method and responseStatusCode (#1971)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-04 16:15:08 +05:30
Prashant Shahi
ebbad5812f ci: 👷 fix testing and staging deployments (#1980)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-04 14:33:52 +05:30
Palash Gupta
7b86022280 feat: antdv5 is updated (#1880)
* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* Update index.tsx

* fix: import is fixed

* chore: un used import is fixed

* fix: dark mode is updated in service map

* fix: config dropdown is updated

* fix: logs types is updated

* fix: copy clipboard notification is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2023-01-04 12:48:12 +05:30
Prashant Shahi
da1fd4b0cd ci(deployments): workflows for staging and testing deployments and related changes (#1933)
* chore(Makefile): ️ remove no-cache from all docker build commands
* chore(Makefile): 🔧 update target name
* feat(docker-standalone):  introduce tag environment variables for easy custom deployments
* ci(deployments): 👷 workflows for staging and testing deployments
* ci(deployments): 👷 pass DEV_BUILD env to remote host
2023-01-03 22:28:48 +05:30
Prashant Shahi
57d28be9f5 fix: 🐛 resolve redundant metrics issue (#1946)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-03 10:55:58 +05:30
Palash Gupta
126c9238ba feat: loading is added in the button (#1927)
* feat: loading is added in the button

* chore: disable condition is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2023-01-02 12:08:35 +05:30
Pranay Prateek
31a3bc09c8 Removing Beta tag from Logs (#1952) 2022-12-31 11:11:48 +05:30
Vishal Sharma
6ba5c0ecad fix: apply filters on count of exceptions (#1945) 2022-12-30 16:46:13 +05:30
Palash Gupta
27cd514fa5 fix: Logs double api is called (#1947) 2022-12-30 13:59:02 +05:30
Yash Joshi
f0e13784e5 fix(sidebar): highlight active feature in nested route (#1929)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-30 01:10:02 +05:30
Yash Joshi
742ceac32c fix(logs): prevent duplicate logs dispatch (#1934)
* fix(logs): prevent duplicate logs dispatch

* refactor: use useMountedstate hook

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-12-30 00:51:53 +05:30
Ankit Nayan
545d46c39c Merge pull request #1943 from SigNoz/release/v0.13.0
Release/v0.13.0
2022-12-29 17:32:15 +05:30
Prashant Shahi
d134e4f4d9 chore: 📌 pin versions: SigNoz 0.13.0, SigNoz OtelCollector 0.66.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-29 14:27:24 +05:30
Ankit Nayan
e03b0aa45f chore/analytics (#1939)
* fix: not capturing empty filters

* feat: removing signoz_ metrics using grep

* fix: initialise companyDomain

* feat: added ttl status
2022-12-29 01:14:57 +05:30
Vishal Sharma
46e131698e fix: exception filter clear (#1936) 2022-12-28 17:48:39 +05:30
Ankit Nayan
d1ee15c372 fix: nil pointer 2022-12-28 15:30:24 +05:30
Ankit Nayan
1e035be978 Merge branch 'develop' into chore/analytics 2022-12-28 15:26:59 +05:30
Vishal Sharma
88a97fc4b8 add exception page filters support (#1919)
* feat: backend changes for supporting exception filters

* feat: frontend changes for exception page filter support

* chore: extractSingleFilterValue is updated

* fix: handle frontend edge case

Co-authored-by: Ankit Nayan <ankit@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-28 14:54:15 +05:30
Nityananda Gohain
2e58f6db7a fix: error handling for index removal from selected field (#1935)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-28 14:31:57 +05:30
Amol Umbark
1916fc87b0 fix: added clear filters button (#1920)
* fix: added clear filters button

* fix: removed console log


Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-28 14:30:37 +05:30
Ankit Nayan
d8882acdd7 fix: changed or to and 2022-12-28 02:34:07 +05:30
Ankit Nayan
7f42b39684 fix: changed or to and 2022-12-28 02:33:21 +05:30
Ankit Nayan
b11f79b4c7 Chore/analytics (#1922)
* fix: reduced rate limit to 2 of each events in 1 min

* feat: added new event for length of filters in logs search page

* feat: added distributed cluster info

* fix: length of filters in logs

* feat: dashboard metadata with no rateLimit

* feat: active user

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-28 02:16:46 +05:30
Ankit Nayan
c717e39a1a Merge branch 'chore/analytics' of https://github.com/SigNoz/signoz into chore/analytics 2022-12-28 02:10:36 +05:30
Ankit Nayan
c3253687d0 feat: active user 2022-12-28 02:09:44 +05:30
Yash Joshi
895c721b37 fix(version): use link instead of click handler (#1931)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-27 23:13:13 +05:30
Vishal Sharma
35f5fb6957 fix: respect durationSort feature flag on getSpanFilters API (#1900)
* fix: respect durationSort feature flag on getSpanFilters API

* chore: update DB query
2022-12-27 21:09:36 +05:30
Palash Gupta
40ec4517c2 fix: per page is added in the dependancy (#1926) 2022-12-27 19:01:56 +05:30
Srikanth Chekuri
48a6f536fa chore: increase dimensions_cache_size for signozspanmetrics processor (#1925) 2022-12-27 15:44:39 +05:30
Palash Gupta
13a6d7f7c6 fix: live tail time out is updated (#1899)
* fix: live tail time out is updated
* Update livetail.ts

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-27 13:36:37 +05:30
Srikanth Chekuri
8b6ed0f951 Merge branch 'develop' into chore/analytics 2022-12-27 12:21:51 +05:30
Srikanth Chekuri
eef48c54f8 fix(query_range): invalid memory address or nil pointer dereference (#1875)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-27 11:28:15 +05:30
Ankit Nayan
aad962d07d feat: dashboard metadata with no rateLimit 2022-12-27 01:10:01 +05:30
Ankit Nayan
18bbb3cf36 fix: length of filters in logs 2022-12-26 23:10:55 +05:30
Ankit Nayan
a3455fb553 feat: added distributed cluster info 2022-12-26 23:01:54 +05:30
Ankit Nayan
ece2988d0d feat: added new event for length of filters in logs search page 2022-12-26 22:11:23 +05:30
Ankit Nayan
db704b212d fix: reduced rate limit to 2 of each events in 1 min 2022-12-26 21:52:54 +05:30
Amol Umbark
4b13b0a8a4 fix: resolves issue related ops not flowing from search box to panel (#1918) 2022-12-26 20:31:50 +05:30
Palash Gupta
6f6499c267 fix: flush logs before starting (#1912)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:25:55 +05:30
Prashant Shahi
3dcb44a758 fix docker-compose for swarm and related changes for distributed clickhouse (#1863)
* chore: 🔧 fix docker-compose.yaml for swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🔧 add .gitkeep files for docker and swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:16:47 +05:30
Palash Gupta
0595cdc7af fix: scroll is added (#1873)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:14:54 +05:30
Palash Gupta
092c02762f feat: add no found with no events are present (#1874)
* chore: not found component is updated
* feat: no events handling is updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:14:17 +05:30
Palash Gupta
d1d2829d2b fix: logs issues (#1889)
* changed debounce interval to 600ms

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 16:45:28 +05:30
Palash Gupta
ac446294e7 fix: logs selection of filter is fixed (#1910)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 16:20:34 +05:30
Marius Kimmina
1cceab4d5e fix(FE): remove unnecessary complexity from password check (#1904)
Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
2022-12-26 16:02:18 +05:30
Ankit Nayan
02898d14f9 fix: removes password validations other than length (#1909) 2022-12-26 15:42:08 +05:30
Nityananda Gohain
09af6c262c fix: proxy_read_timeout updated in nginx conf (#1885)
* fix: proxy_read_timeout updated in nginx conf
* fix: live tail endpoint-flush the headers first

Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 15:29:49 +05:30
Amol Umbark
faeaeb61a0 fix: added validations on query builder (#1906)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 15:10:01 +05:30
Nityananda Gohain
9c80ba6b78 fix: allow multiple spaces between a filter expression (#1897)
* fix: allow multiple spaces between a filter expression

* fix: regex updated to respect spaces between a search string


Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-26 15:08:43 +05:30
Palash Gupta
dbba8b5b55 feat: event time is updated when root span is missing 2022-12-22 17:35:20 +05:30
Pranay Prateek
58ce838023 chore: Updating stale edition message (#1896) 2022-12-22 11:44:28 +05:30
Srikanth Chekuri
5260b152f5 fix: do not show result of sub queries in external calls (#1858) 2022-12-20 19:54:27 +05:30
Ankit Nayan
f2dd254d83 Merge pull request #1849 from SigNoz/release/v0.12.0
Release/v0.12.0
2022-12-11 00:14:59 +05:30
Prashant Shahi
82d53fa45c chore: 📌 pin versions: SigNoz 0.12.0, SigNoz OtelCollector 0.66.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-10 20:17:49 +05:30
Zsombor
c38d1c150d Fix case sensitivity in query parsing (#1670)
* Fix case sensitivity in query parsing - now the parser correctly recognize fields which contains uppercase letters

* fix: logs parser respects the case of fields

Co-authored-by: nityanandagohain <nityanandagohain@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-10 19:27:57 +05:30
Srikanth Chekuri
16170eacc0 Revert "chore: use local table for innery query (#1815) (#1847)
* Revert "chore: use local table for innery query (#1815)"

This reverts commit 1b52edb056.

* chore: use localhost
2022-12-10 19:25:44 +05:30
Amol Umbark
66ddbfc085 fix: solves issue legend update causing null ch query (#1845)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-10 12:21:20 +05:30
Vishal Sharma
2715ab61a4 chore: introduce docker_multi_node_cluster and by default set to false (#1839)
* chore: introduce docker_multi_node_cluster and by default set to false

* chore(query-service): 🔧 include docker_multi_node_cluster for tests

Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Prashant Shahi <me@prashantshahi.dev>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 21:57:25 +05:30
Amol Umbark
4d291e92b9 fix: changed table names in default alert queries (#1843)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2022-12-09 21:54:51 +05:30
Nityananda Gohain
1b73649f8e fix: add default value for materialized column in distributed logs table (#1835)
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 20:18:58 +05:30
Amol Umbark
0abae1c09c feat: show release note in alerts dashboards and services pages (#1840)
* feat: show release note in alerts dashboards and services pages

* fix: made code changes as per review and changed message in release note

* fix: solved build pipeline issue

* fix: solved lint issue

Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 20:16:09 +05:30
Pranay Prateek
4d02603aed Update README.md 2022-12-09 14:01:05 +05:30
Pranay Prateek
c58e43a678 Update README.md 2022-12-09 12:54:34 +05:30
Pranay Prateek
b77bbe1e4f Update README.md 2022-12-09 12:50:41 +05:30
Pranay Prateek
d4eb241c04 Update README.md 2022-12-09 12:48:57 +05:30
Pranay Prateek
98e1a77a43 Update README.md 2022-12-09 12:48:30 +05:30
Pranay Prateek
498b04491b updated logs image 2022-12-09 12:42:25 +05:30
Pranay Prateek
4e58414cc2 Update README.md 2022-12-09 12:36:05 +05:30
Pranay Prateek
67943cfec0 Update README.md 2022-12-09 12:32:03 +05:30
Palash Gupta
f170eb1b23 fix: scroll is added in case of extra space (#1838) 2022-12-09 10:00:55 +05:30
Vishal Sharma
6931b18382 fix: remove shared variable in TTL and async TTL queries (#1821)
* fix: remove shared variable in TTL

* fix: set distributed_ddl_task_timeout to 0 for async TTL

* chore: updated distributed_ddl_task_timeout to sync TTL queries
2022-12-07 18:23:01 +05:30
Prashant Shahi
8a9d6f664a fix: 🐛 log parsing issue (#1824)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2022-12-07 17:57:55 +05:30
Amol Umbark
8affe8df31 fix: solved issue with google help link (#1826) 2022-12-07 16:10:17 +05:30
Nityananda Gohain
1c8626e933 feat: usage collection updated for ee (#1654)
* feat: usage collection updated with new schema and logic

* fix: added exporter id and common collector id

* fix: upload usage only when license is present

* fix: handle if db doesn't exists

* fix: select query updated for usage collection to support distributed table

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:52:39 +05:30
Amol Umbark
87932de668 [feat] ee/google auth implementation (#1775)
* [feat] initial version for google oauth

* chore: arranged the sso packages and added prepare request for google auth

* feat: added google auth config page and backend to handle the request

* chore: code cleanup for domain SSO parsing

* Update constants.go

* chore: moved redirect sso error

* chore: lint issue fixed with domain

* chore: added tooltip for enforce sso and few changes to auth domain

* chore: moved question mark in enforce sso

* fix: resolved pr review comments

* chore: fixed type check for saml config

* fix: fixed saml config form

* chore: added util for transformed form values to samlconfig

Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:32:59 +05:30
Srikanth Chekuri
1b52edb056 chore: use local table for innery query (#1816)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:31:38 +05:30
Priyanka Chakraborty
5a81557df7 1374 dbcalls querybuilder (#1608)
* refactor: dbcalls-fromPromql-querybuilder


Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 16:52:20 +05:30
Prashant Shahi
8bb3eefeb5 chore(clickhouse): 🔧 include cluster.xml for distributed set up (#1810)
* chore(clickhouse): 🔧 include cluster.xml for distributed set up

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-05 17:26:13 +05:30
Amol Umbark
a46f074e22 fix: resolves empty variables issue for imported dashboards (#1808) 2022-12-05 16:48:11 +05:30
Prashant Shahi
88fa3b7699 feat(distributed): create single docker-compose.yaml and CH configuration (#1803)
* feat: setup for distributed clickhouse

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-05 16:24:01 +05:30
Ankit Nayan
7f77bcca2b Feat/distributed ch (#1701)
* feat: support for distributed table

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-02 12:30:28 +05:30
Palash Gupta
ab5311caac feat: events is updated by adding the timestamp (#1802)
* feat: events is updated
* chore: title is updated
* feat: trace detail event timestamp is updated
2022-12-02 10:34:06 +05:30
Palash Gupta
8aae9f53a9 feat: search in tags is updated (#1788)
* feat: search in tags is updated

* chore: placeholder is updated
2022-12-01 14:19:12 +05:30
Ankit Nayan
18d80d47e5 Merge pull request #1776 from SigNoz/release/v0.11.4
Release/v0.11.4
2022-11-29 17:36:31 +05:30
Prashant Shahi
8e5522820c chore: 📌 pin versions: SigNoz 0.11.4
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-11-29 17:12:17 +05:30
Palash Gupta
5ae9557293 fix: logs time is fixed (#1772)
* fix: logs parsing is fixed

* fix: start and end time is updated
2022-11-29 14:41:36 +05:30
Palash Gupta
7e590f4bfb feat: meta description and image is updated (#1764)
* feat: meta description is updated

* chore: image is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-28 18:50:17 +05:30
Palash Gupta
ce072bdc3f fix: trace event is now not decoding the events (#1766)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-28 18:27:09 +05:30
Nityananda Gohain
67c0c9032f fix: logs aggreagte endpoint updated to differentiate between params and query string (#1768) 2022-11-28 18:16:21 +05:30
Palash Gupta
6c9036fbf4 fix[logs][FE]: live tail is fixed (#1759)
* fix: live tail is fixed

* fix: graph state is updated

* chore: step size is updated

* chore: xaxis config is updated

* chore: isDisabled state is updated for top navigation

* chore: selected interval is updated in the reducer

* fix: build is fixed

* chore: xAxis config is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-28 15:44:33 +05:30
Nityananda Gohain
d06d41af87 fix: parser updated to differentiate between params and query string (#1763) 2022-11-28 14:18:43 +05:30
Amol Umbark
2771d2e774 fix: [alerts] [ch-query] added aliases in metric query result (#1760)
* fix: [alerts] [ch-query] added aliases in metric query result

* fix: added more column type support for target in ch query

* fix: added error handling when data type is unexpected in metric result

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-27 14:29:09 +05:30
Amol Umbark
0cbba071ea fix: [alerts] fixed selected interval for chart preview in ch use case (#1761) 2022-11-25 16:04:09 +05:30
Amol Umbark
7cec2db503 fix: [alerts] solved legend not updating issue in ch query editor (#1757)
* fix: [alerts] solved legend not updating issue in ch query editor

* fix: [alerts]removed console.log

* fix: added jsdoc description tag
2022-11-25 12:16:47 +05:30
Amol Umbark
4b3829fd5b fix: fixed date condition (start and end) while preparing ch query (#1751)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-24 18:19:07 +05:30
Vishal Sharma
983ca1ec6a feat: introduce getSubTreeSpans function in clickhouse query builder & introduce smart trace detail algorithm (#1648)
* perf: introduce smart trace detail algorithm
* fix: remove hardcoded levels and handle null levels
* feat: add support for broken trees
* feat: use spanLimit env variable
* fix: handle missing root span
* add root spanId and root name
* use permanent table
* add kind, events and tagmap support
* fix query formation
* increase context timeout to 600s
* perf improvement
* handle error
* return tableName as response to query
* support multiple queries tableName
* perf: improve memory and latency
* feat: add getSubTree custom func and smart trace detail algo to ee
* fix: create new functions for ee
* chore: refactor codebase
* chore: refactor frontend code


Co-authored-by: Ankit Nayan <ankit@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-24 18:18:19 +05:30
Amol Umbark
33d34af2a6 feat: added exception based alerts (#1752) 2022-11-24 18:00:02 +05:30
Vishal Sharma
b0ec619881 fix: trace table pagination (#1749)
* fix: trace table pagination

* chore: refactor

* chore: refactor

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-24 16:25:26 +05:30
Amol Umbark
220f848b04 feat: [UI] clickhouse queries in alert builder (#1706)
* feat: added ui changes to support clickhouse queries in alert builder

* chore: minor fix to alert rules ui

* feat: alert form changes: ch query support, alert type selection

* chore: resolved review comments

* chore: added list for alert type selection instead

* chore: removed hard coded color and added antd/colors

* fix: resolved some issues found during testing alerts

* fix: moved alert defaults and added default queries for logs and traces

* feat: added default queries for logs and traces to reflect ts vars

* chore: fixed px to rem

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-24 13:21:46 +05:30
Palash Gupta
4727dbc9f0 fix: if invalid switch is disabled (#1656)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-24 00:08:56 +05:30
Amol Umbark
00863e54de feat: added ch query support (#1735)
* feat: added ch query support
* fix: added new vars to resolve alert query format issue
* fix: replaced timestamp vars in metric query range

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-23 18:49:03 +05:30
Ankit Nayan
e9c47a6a73 Merge branch 'develop' of https://github.com/SigNoz/signoz into develop 2022-11-23 16:58:05 +05:30
Ankit Nayan
88af456915 chore: detect first registration 2022-11-23 16:57:49 +05:30
Ankit Nayan
7ebc94c273 display message updated (#1744)
* display message updated

* chore: display message changed
2022-11-23 16:44:47 +05:30
Palash Gupta
d5bd991417 fix: onApply data is updated (#1655) 2022-11-23 16:25:02 +05:30
Palash Gupta
4c0d573760 fix: Logs issues are fixed (#1727)
* feat: logs is updated
* chore: width:100% is removed
* chore: position of filter is updated
* chore: min time and max time are now tracked from global state


Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-23 13:42:36 +05:30
Vishal Sharma
1273bb5865 fix: getNanoTimestamp function and cache fix (#1737) 2022-11-22 13:13:10 +05:30
Palash Gupta
87502baabf feat: filter is added in exceptions page (#1731)
* feat: filter is added

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2022-11-22 12:08:51 +05:30
Palash Gupta
90a6313423 feat: value graph is updated (#1733) 2022-11-21 21:03:33 +05:30
Palash Gupta
4a244ad7b2 feat: onClick is updated (#1732)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-21 16:44:41 +05:30
Palash Gupta
db105af89f refactor: some of the styles are removed and used native antd components (#1730) 2022-11-21 13:39:54 +05:30
Palash Gupta
b8c58a9812 chore: removed unnessesary eslint check (#1668)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-18 19:04:40 +05:30
Ankit Nayan
78d2377520 Merge pull request #1722 from SigNoz/release/v0.11.3
Release/v0.11.3
2022-11-16 19:50:55 +05:30
Ankit Nayan
549535d09e Update README.md
Added Ruby

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-16 19:12:36 +05:30
Palash Gupta
ac4d35c6c0 chore: alignment is fixed in header (#1723)
* chore: alignment is fixed
2022-11-16 19:08:09 +05:30
Prashant Shahi
ad34c6e25f Merge branch 'develop' into release/v0.11.3 2022-11-16 17:37:56 +05:30
Prashant Shahi
c306701bab chore: 📌 pin versions: SigNoz 0.11.3, SigNoz OtelCollector 0.63.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-11-16 17:33:27 +05:30
Pranay Prateek
fcc725c6e6 Update README.md 2022-11-16 17:17:08 +05:30
Prashant Shahi
d615d7a9e3 Updating collection interval in otelcol configuration files (#1720)
* chore: 🔧 set collection interval of hostmetrics to 30s while others to 60s
2022-11-15 20:33:56 +05:30
Prashant Shahi
622943645f Bump version of clickhouse to 22.8.8 LTS and deploy file changes (#1711)
* chore: 🔥 remove docker-compose-prod.yaml as redundant and update Makefile
* chore: 🔧 scrape otel-collector internal metrics in same container and related changes
* chore: 📌 Bump version of clickhouse to 22.8.8 LTS

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-15 20:07:09 +05:30
Srikanth Chekuri
355264a43e chore: bump SigNoz/prometheus to v1.9.76 (#1719) 2022-11-15 18:45:47 +05:30
Srikanth Chekuri
2c7deca2ec fix: include inner panels support and map job,instance correctly (#1718)
* fix: include inner panels support and map job,instance correctly

* chore: remove debug and tidy up bit
2022-11-15 18:23:20 +05:30
Vishal Sharma
e558dcae3a fix: update trace URI when coming from metrics (#1715) 2022-11-15 13:08:48 +05:30
Srikanth Chekuri
4cf3dc2ec3 fix: remove usage of labels object (#1710)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-14 22:51:23 +05:30
Palash Gupta
2e124da366 feat: refresh interval is added (#1712)
* feat: refresh interval is added
2022-11-14 22:32:19 +05:30
Vishal Sharma
a50d7f227c Feat: dynamic tooltip (#1705)
* feat: integrate config service with query service
* feat: add tooltip checkpoint
* feat: add support for dark and light mode icons

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-14 14:29:13 +05:30
Ankit Nayan
73706d872f Update telemetry.go 2022-11-12 17:19:34 +05:30
Palash Gupta
0480197914 fix Logs contains issue (#1708)
* chore: logs is updated
* chore: contains is updated
2022-11-12 11:37:52 +05:30
Palash Gupta
65af8c1b98 801 dropdown is added in the dashboard page (#1669)
* chore: update the import from constant rather than static string

* chore: removed redundant div

* feat: added auto refresh component

* refactor: top nav is refactored
2022-11-10 20:48:40 +05:30
Nityananda Gohain
a3b03ef0ca fix: parser updated to support escaped quotes in search (#1704)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-10 18:24:20 +05:30
Srikanth Chekuri
9735a6e5ce feat: add ability to import Grafana dashboards (#1700)
* feat: add ability to import Grafana dashboards

* chore: remove unnecessary file

* chore: more 9XX support

* chore: some more hacks

* chore: update deps

* chore: arrange equal spaced widgets instead of inheriting from grafana
2022-11-10 16:49:54 +05:30
Vishal Sharma
674883cd18 Feature flagging (#1674)
* feat: introduce feature flagging via env variables
* refactor: enable sorting by default for users
2022-11-09 08:30:00 +05:30
Pang
36315fcf9c fix README.zh-cn.md readable (#1647)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-03 06:09:15 +05:30
Palash Gupta
46050a217c feat: all trace now open in new tab (#1662) 2022-10-26 12:53:47 +05:30
Ankit Nayan
c9363586e1 Merge branch 'main' into develop 2022-10-17 14:36:38 +05:30
Ankit Nayan
5eed384ffe Merge pull request #1637 from SigNoz/release/v0.11.2
Release/v0.11.2
2022-10-13 16:39:48 +05:30
Prashant Shahi
1b152c19ec ci(e2e): 👷 enable DEV_BUILD flag for query-service (#1636)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-10-13 16:10:36 +05:30
679 changed files with 27531 additions and 22845 deletions

2
.github/CODEOWNERS vendored
View File

@@ -4,4 +4,4 @@
* @ankitnayan
/frontend/ @palashgdev @pranshuchittora
/deploy/ @prashant-shahi
/pkg/query-service/ @srikanthccv
**/query-service/ @srikanthccv

2
.github/config.yml vendored
View File

@@ -17,7 +17,7 @@ newPRWelcomeComment: >
# Comment to be posted to on pull requests merged by a first time user
firstPRMergeComment: >
Congrats on merging your first pull request!
![minion-party](https://i.imgur.com/Xlg59lP.gif)
We here at SigNoz are proud of you! 🥳

View File

@@ -32,6 +32,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Run tests
shell: bash
run: |
make test
- name: Build query-service image
shell: bash
run: |

View File

@@ -16,6 +16,8 @@ jobs:
uses: actions/checkout@v2
- name: Build query-service image
env:
DEV_BUILD: 1
run: make build-ee-query-service-amd64
- name: Build frontend image
@@ -55,7 +57,7 @@ jobs:
--set frontend.service.type=LoadBalancer \
--set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl get pods -n platform
kubectl get svc -n platform

View File

@@ -17,4 +17,3 @@ jobs:
uses: hattan/verify-linked-issue-action@v1.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -11,6 +11,6 @@ jobs:
- name: Remove label
uses: buildsville/add-remove-label@v1
with:
label: ok-to-test
label: ok-to-test,testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,25 +0,0 @@
on:
schedule:
# Run this once per day, towards the end of the day for keeping the most
# recent data point most meaningful (hours are interpreted in UTC).
- cron: "0 8 * * *"
workflow_dispatch: # Allow for running this manually.
jobs:
j1:
name: repostats
runs-on: ubuntu-latest
steps:
- name: run-ghrs
uses: jgehrcke/github-repo-stats@v1.1.0
with:
# Define the stats repository (the repo to fetch
# stats for and to generate the report for).
# Remove the parameter when the stats repository
# and the data repository are the same.
repository: signoz/signoz
# Set a GitHub API token that can read the stats
# repository, and that can push to the data
# repository (which this workflow file lives in),
# to store data and the report files.
ghtoken: ${{ github.token }}

View File

@@ -24,4 +24,3 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

View File

@@ -0,0 +1,40 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
on:
push:
branches:
- develop
jobs:
deploy:
name: Deploy latest develop branch to staging
runs-on: ubuntu-latest
environment: staging
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.8
env:
GITHUB_BRANCH: develop
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main"
docker system prune --force
docker pull signoz/signoz-otel-collector:main
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-signoz

View File

@@ -0,0 +1,39 @@
name: testing-deployment
# Trigger deployment only on testing-deploy label on pull request
on:
pull_request:
types: [labeled]
jobs:
deploy:
name: Deploy PR branch to testing
runs-on: ubuntu-latest
environment: testing
if: ${{ github.event.label.name == 'testing-deploy' }}
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.8
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-signoz

4
.gitignore vendored
View File

@@ -52,4 +52,6 @@ ee/query-service/tests/test-deploy/data/
*.db
/deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/
bin/
bin/
*/query-service/queries.active

View File

@@ -215,9 +215,26 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
## 4.1 To run ClickHouse setup (recommended for local development)
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```

View File

@@ -45,7 +45,7 @@ build-frontend-amd64:
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
@@ -54,7 +54,7 @@ build-push-frontend:
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
docker buildx build --file Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of query service
@@ -65,7 +65,7 @@ build-query-service-amd64:
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
@@ -73,7 +73,7 @@ build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane \
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
@@ -84,11 +84,11 @@ build-ee-query-service-amd64:
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
else \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
fi
@@ -98,7 +98,7 @@ build-push-ee-query-service:
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
--progress plane --push --platform linux/arm64,linux/amd64 \
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
@@ -119,20 +119,23 @@ down-local:
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v
run-x86:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-prod.yaml \
up --build -d
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
down-x86:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-prod.yaml \
down -v
run-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
test:
go test ./pkg/query-service/app/metrics/...
go test ./pkg/query-service/app/...

View File

@@ -23,7 +23,9 @@
##
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
👉 Visualise Metrics, Traces and Logs in a single pane of glass
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
@@ -31,11 +33,40 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
👉 Run aggregates on trace data to get business relevant metrics
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
<br />
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br />
![screenzy-1647005040573](https://user-images.githubusercontent.com/504541/157875938-a3d57904-ea6d-4278-b929-bd1408d7f94c.png)
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
👉 Record exceptions automatically in Python, Java, Ruby, and Javascript
👉 Easy to set alerts with DIY query builder
### Application Metrics
![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png)
### Distributed Tracing
<img width="2068" alt="distributed_tracing_2 2" src="https://user-images.githubusercontent.com/83692067/226536447-bae58321-6a22-4ed3-af80-e3e964cb3489.png">
<img width="2068" alt="distributed_tracing_1" src="https://user-images.githubusercontent.com/83692067/226536462-939745b6-4f9d-45a6-8016-814837e7f7b4.png">
### Logs Management
<img width="2068" alt="logs_management" src="https://user-images.githubusercontent.com/83692067/226536482-b8a5c4af-b69c-43d5-969c-338bd5eaf1a5.png">
### Infrastructure Monitoring
<img width="2068" alt="infrastructure_monitoring" src="https://user-images.githubusercontent.com/83692067/226536496-f38c4dbf-e03c-4158-8be0-32d4a61158c7.png">
### Exceptions Monitoring
![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png)
### Alerts
<img width="2068" alt="alerts_management" src="https://user-images.githubusercontent.com/83692067/226536548-2c81e2e8-c12d-47e8-bad7-c6be79055def.png">
<br /><br />
@@ -51,12 +82,16 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
## Features:
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
- Slowest endpoints in your application
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
- Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster
- Lightening quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/))
- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments
- Easy to set alerts with DIY query builder
<br /><br />
@@ -78,6 +113,12 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
- Python
- NodeJS
- Go
- PHP
- .NET
- Ruby
- Elixir
- Rust
You can find the complete list of languages here - https://opentelemetry.io/docs/
@@ -123,6 +164,25 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
- Jaegar UI doesnt show any metrics on traces or on filtered traces
- Jaeger cant get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
<p>&nbsp </p>
### SigNoz vs Elastic
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
- 50% lower resource requirement compared to Elastic during ingestion
We have published benchmarks comparing Elastic with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz supports aggregations on high-cardinality data over a huge volume while loki doesnt.
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
We have published benchmarks comparing Loki with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />

View File

@@ -13,14 +13,19 @@
##
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式踪来增加软件技术栈的可见性。
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式踪来增加软件技术栈的可见性。
👉 你能看到一些性能矩阵服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
👉 你能看到一些性能指标服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
👉 通过准确的踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
👉 通过准确的踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
👉 聚合trace数据来获得业务相关指标。
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
<br />
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br />
![screenzy-1647005040573](https://user-images.githubusercontent.com/504541/157875938-a3d57904-ea6d-4278-b929-bd1408d7f94c.png)
<br /><br />
@@ -36,12 +41,12 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
## 功能:
- 应用总览矩阵(matrix)如RPS, 50/90/99百分比延迟率,错误率
- 应用概览指标(metrics)如RPS, p50/p90/p99延迟率分位值,错误率等。
- 应用中最慢的终端(endpoint)
- 查看准确的网络请求跟踪来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤跟踪
- 对过滤后的跟踪数据做矩阵聚合。比如,获得过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal`的错误率和p99延迟
- 整合的矩阵和跟踪用户界面。不需要像从Prometheus切换到Jaeger才能调试问题
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
- 为metrics和trace提供统一的UI。排查问题不需要PrometheusJaeger之间切换。
<br /><br />
@@ -53,7 +58,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
我们想做一个自服务的开源版本的工具类似于DataDog和NewRelic用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
开源也让你对配置、采样和上线率有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
开源也让你对配置、采样和正常运行时间有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
### 语言支持
@@ -71,8 +76,8 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## 入门
### 使用Docker部署
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
@@ -80,35 +85,34 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
<p>&nbsp </p>
### 使用Helm在Kubernetes上部署
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparisons to Familiar Tools
## 与其他方案的比较
### SigNoz vs Prometheus
如果你只是需要矩阵那Prometheus是不错的但如果你要无缝的在矩阵和跟踪之间切换那目前把Prometheus & Jaeger串起来的体验并不好。
如果你只是需要监控指标(metrics)那Prometheus是不错的但如果你要无缝的在metrics和traces之间切换那目前把Prometheus & Jaeger串起来的体验并不好。
我们的目标是在矩阵和跟踪之间提供整合的UI - 类似于Datadog这样的Saas厂提供的方案,能够对跟踪进行过滤和聚合这是目前Jaeger缺失的功能。
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合这是目前Jaeger缺失的功能。
<p>&nbsp </p>
### SigNoz vs Jaeger
Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们在计划中也有日志管理功能
Jaeger只做分布式追踪(distributed tracing)SigNoz则支持metrics,traces,logs ,即可视化的三大支柱
并且SigNoz有一些Jaeger没有的高级功能
- Jaegar UI无法在跟踪或过滤的跟踪基础上展示矩阵
- Jaeger不能过滤的跟踪上进行聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟在SigNoz这很容易实现。
- Jaegar UI无法在traces或过滤的traces上展示metrics
- Jaeger不能过滤的traces做聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
<br /><br />
@@ -121,6 +125,23 @@ Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />

View File

@@ -27,12 +27,6 @@ For x86 chip (amd):
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
```
For Mac with Apple chip (arm):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
```
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI.

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

View File

@@ -236,8 +236,8 @@
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
To generate dhparams, use the following command:
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
@@ -618,148 +618,6 @@
</jdbc_bridge>
-->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>localhost</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
</test_shard_localhost>
<test_cluster_one_shard_three_replicas_localhost>
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard>
<!--shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard-->
</test_cluster_one_shard_three_replicas_localhost>
<test_cluster_two_shards_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_localhost>
<test_cluster_two_shards>
<shard>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards>
<test_cluster_two_shards_internal_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_internal_replication>
<test_shard_localhost_secure>
<shard>
<replica>
<host>localhost</host>
<port>9440</port>
<secure>1</secure>
</replica>
</shard>
</test_shard_localhost_secure>
<test_unavailable_shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>1</port>
</replica>
</shard>
</test_unavailable_shard>
</remote_servers>
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
If this section is not present in configuration, all hosts are allowed.
-->
@@ -786,29 +644,6 @@
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<!--
<zookeeper>
<node>
<host>example1</host>
<port>2181</port>
</node>
<node>
<host>example2</host>
<port>2181</port>
</node>
<node>
<host>example3</host>
<port>2181</port>
</node>
</zookeeper>
-->
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.

View File

@@ -1,30 +1,127 @@
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
deploy:
restart_policy:
condition: on-failure
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-clickhouse-depend: &clickhouse-depend
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.0
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:22.4.5-alpine
<<: *clickhouse-defaults
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
# - "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
# clickhouse-2:
# <<: *clickhouse-defaults
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.0-0.2
@@ -40,7 +137,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.11.2
image: signoz/query-service:0.18.3
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
@@ -59,18 +156,17 @@ services:
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
depends_on:
- clickhouse
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.11.2
image: signoz/frontend:0.18.3
deploy:
restart_policy:
condition: on-failure
@@ -83,7 +179,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.55.3
image: signoz/signoz-otel-collector:0.66.7
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@@ -91,6 +187,8 @@ services:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
@@ -107,11 +205,10 @@ services:
mode: global
restart_policy:
condition: on-failure
depends_on:
- clickhouse
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.55.3
image: signoz/signoz-otel-collector:0.66.7
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
@@ -123,8 +220,7 @@ services:
deploy:
restart_policy:
condition: on-failure
depends_on:
- clickhouse
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:1.30

View File

@@ -47,7 +47,7 @@ receivers:
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 60s
collection_interval: 30s
scrapers:
cpu: {}
load: {}
@@ -55,6 +55,18 @@ receivers:
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
@@ -65,16 +77,19 @@ processors:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
override: false
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 10000
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: 'signoz.collector.id'
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -94,15 +109,20 @@ processors:
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
@@ -134,10 +154,14 @@ service:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/hostmetrics:
metrics/generic:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@@ -2,27 +2,21 @@ receivers:
prometheus:
config:
scrape_configs:
# otel-collector internal metrics
- job_name: "otel-collector"
scrape_interval: 60s
dns_sd_configs:
- names:
- 'tasks.otel-collector'
type: 'A'
port: 8888
# otel-collector-metrics internal metrics
- job_name: "otel-collector-metrics"
- job_name: otel-collector-metrics
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
- localhost:8888
labels:
job_name: otel-collector-metrics
# SigNoz span metrics
- job_name: "signozspanmetrics-collector"
- job_name: signozspanmetrics-collector
scrape_interval: 60s
dns_sd_configs:
- names:
- 'tasks.otel-collector'
type: 'A'
- tasks.otel-collector
type: A
port: 8889
processors:

View File

@@ -30,6 +30,8 @@ server {
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

View File

@@ -236,8 +236,8 @@
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
To generate dhparams, use the following command:
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
@@ -618,148 +618,6 @@
</jdbc_bridge>
-->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>localhost</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
</test_shard_localhost>
<test_cluster_one_shard_three_replicas_localhost>
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard>
<!--shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard-->
</test_cluster_one_shard_three_replicas_localhost>
<test_cluster_two_shards_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_localhost>
<test_cluster_two_shards>
<shard>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards>
<test_cluster_two_shards_internal_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_internal_replication>
<test_shard_localhost_secure>
<shard>
<replica>
<host>localhost</host>
<port>9440</port>
<secure>1</secure>
</replica>
</shard>
</test_shard_localhost_secure>
<test_unavailable_shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>1</port>
</replica>
</shard>
</test_unavailable_shard>
</remote_servers>
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
If this section is not present in configuration, all hosts are allowed.
-->
@@ -786,29 +644,6 @@
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<!--
<zookeeper>
<node>
<host>example1</host>
<port>2181</port>
</node>
<node>
<host>example2</host>
<port>2181</port>
</node>
<node>
<host>example3</host>
<port>2181</port>
</node>
</zookeeper>
-->
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
@@ -1070,7 +905,8 @@
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Configuration of user defined executable functions -->
<user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config>
<user_defined_executable_functions_config>*function.xml</user_defined_executable_functions_config>
<user_scripts_path>/var/lib/clickhouse/user_scripts/</user_scripts_path>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.

View File

@@ -0,0 +1,21 @@
<functions>
<function>
<type>executable</type>
<name>histogramQuantile</name>
<return_type>Float64</return_type>
<argument>
<type>Array(Float64)</type>
<name>buckets</name>
</argument>
<argument>
<type>Array(Float64)</type>
<name>counts</name>
</argument>
<argument>
<type>Float64</type>
<name>quantile</name>
</argument>
<format>CSV</format>
<command>./histogramQuantile</command>
</function>
</functions>

View File

@@ -2,7 +2,7 @@ version: "2.4"
services:
clickhouse:
image: clickhouse/clickhouse-server:22.4.5-alpine
image: clickhouse/clickhouse-server:22.8.8-alpine
container_name: clickhouse
# ports:
# - "9000:9000"
@@ -41,7 +41,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: otel-collector
image: signoz/signoz-otel-collector:0.55.3
image: signoz/signoz-otel-collector:0.66.7
command: ["--config=/etc/otel-collector-config.yaml"]
# user: root # required for reading docker container logs
volumes:
@@ -67,7 +67,7 @@ services:
otel-collector-metrics:
container_name: otel-collector-metrics
image: signoz/signoz-otel-collector:0.55.3
image: signoz/signoz-otel-collector:0.66.7
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml

View File

@@ -28,7 +28,7 @@ services:
- "8080:8080"
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3

View File

@@ -1,44 +0,0 @@
version: "2.4"
services:
query-service:
image: signoz/query-service:0.11.2
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.11.2
container_name: frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -1,31 +1,145 @@
version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-clickhouse-depend: &clickhouse-depend
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.0
container_name: zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:22.4.5-alpine
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
<<: *clickhouse-defaults
container_name: clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:0.23.0-0.2
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.0-0.2}
volumes:
- ./data/alertmanager:/data
depends_on:
@@ -39,7 +153,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.11.2
image: signoz/query-service:${DOCKER_TAG:-0.18.3}
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
@@ -60,16 +174,14 @@ services:
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.11.2
image: signoz/frontend:${DOCKER_TAG:-0.18.3}
container_name: frontend
restart: on-failure
depends_on:
@@ -81,7 +193,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.55.3
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.7}
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@@ -89,6 +201,8 @@ services:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
@@ -102,12 +216,10 @@ services:
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.55.3
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.7}
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
@@ -117,20 +229,18 @@ services:
# - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"

View File

@@ -47,7 +47,7 @@ receivers:
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 60s
collection_interval: 30s
scrapers:
cpu: {}
load: {}
@@ -55,6 +55,19 @@ receivers:
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
@@ -64,12 +77,16 @@ processors:
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 10000
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: 'signoz.collector.id'
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -89,7 +106,6 @@ processors:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
override: false
extensions:
health_check:
@@ -102,16 +118,21 @@ extensions:
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
@@ -138,10 +159,14 @@ service:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/hostmetrics:
metrics/generic:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@@ -6,20 +6,16 @@ receivers:
prometheus:
config:
scrape_configs:
# otel-collector internal metrics
- job_name: "otel-collector"
scrape_interval: 60s
static_configs:
- targets:
- otel-collector:8888
# otel-collector-metrics internal metrics
- job_name: "otel-collector-metrics"
- job_name: otel-collector-metrics
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
- localhost:8888
labels:
job_name: otel-collector-metrics
# SigNoz span metrics
- job_name: "signozspanmetrics-collector"
- job_name: signozspanmetrics-collector
scrape_interval: 60s
static_configs:
- targets:

View File

@@ -0,0 +1,237 @@
package main
import (
"bufio"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
)
// NOTE: executable must be built with target OS and architecture set to linux/amd64
// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
// The following code is adapted from the following source:
// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
type bucket struct {
upperBound float64
count float64
}
// buckets implements sort.Interface.
type buckets []bucket
func (b buckets) Len() int { return len(b) }
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
// buckets will be sorted by upperBound by this function (i.e. no sorting
// needed before calling this function). The quantile value is interpolated
// assuming a linear distribution within a bucket. However, if the quantile
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
// returned. A natural lower bound of 0 is assumed if the upper bound of the
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
// happens linearly between 0 and the upper bound of the lowest bucket.
// However, if the lowest bucket has an upper bound less or equal 0, this upper
// bound is returned if the quantile falls into the lowest bucket.
//
// There are a number of special cases (once we have a way to report errors
// happening during evaluations of AST functions, we should report those
// explicitly):
//
// If 'buckets' has 0 observations, NaN is returned.
//
// If 'buckets' has fewer than 2 elements, NaN is returned.
//
// If the highest bucket is not +Inf, NaN is returned.
//
// If q==NaN, NaN is returned.
//
// If q<0, -Inf is returned.
//
// If q>1, +Inf is returned.
func bucketQuantile(q float64, buckets buckets) float64 {
if math.IsNaN(q) {
return math.NaN()
}
if q < 0 {
return math.Inf(-1)
}
if q > 1 {
return math.Inf(+1)
}
sort.Sort(buckets)
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
return math.NaN()
}
buckets = coalesceBuckets(buckets)
ensureMonotonic(buckets)
if len(buckets) < 2 {
return math.NaN()
}
observations := buckets[len(buckets)-1].count
if observations == 0 {
return math.NaN()
}
rank := q * observations
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
if b == len(buckets)-1 {
return buckets[len(buckets)-2].upperBound
}
if b == 0 && buckets[0].upperBound <= 0 {
return buckets[0].upperBound
}
var (
bucketStart float64
bucketEnd = buckets[b].upperBound
count = buckets[b].count
)
if b > 0 {
bucketStart = buckets[b-1].upperBound
count -= buckets[b-1].count
rank -= buckets[b-1].count
}
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
}
// coalesceBuckets merges buckets with the same upper bound.
//
// The input buckets must be sorted.
func coalesceBuckets(buckets buckets) buckets {
last := buckets[0]
i := 0
for _, b := range buckets[1:] {
if b.upperBound == last.upperBound {
last.count += b.count
} else {
buckets[i] = last
last = b
i++
}
}
buckets[i] = last
return buckets[:i+1]
}
// The assumption that bucket counts increase monotonically with increasing
// upperBound may be violated during:
//
// * Recording rule evaluation of histogram_quantile, especially when rate()
// has been applied to the underlying bucket timeseries.
// * Evaluation of histogram_quantile computed over federated bucket
// timeseries, especially when rate() has been applied.
//
// This is because scraped data is not made available to rule evaluation or
// federation atomically, so some buckets are computed with data from the
// most recent scrapes, but the other buckets are missing data from the most
// recent scrape.
//
// Monotonicity is usually guaranteed because if a bucket with upper bound
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
// have counted all c1 observations and perhaps more, so that c >= c1.
//
// Randomly interspersed partial sampling breaks that guarantee, and rate()
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
// monotonicity is broken. It is exacerbated by rate() because under normal
// operation, cumulative counting of buckets will cause the bucket counts to
// diverge such that small differences from missing samples are not a problem.
// rate() removes this divergence.)
//
// bucketQuantile depends on that monotonicity to do a binary search for the
// bucket with the φ-quantile count, so breaking the monotonicity
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
//
// As a somewhat hacky solution until ingestion is atomic per scrape, we
// calculate the "envelope" of the histogram buckets, essentially removing
// any decreases in the count between successive buckets.
func ensureMonotonic(buckets buckets) {
max := buckets[0].count
for i := 1; i < len(buckets); i++ {
switch {
case buckets[i].count > max:
max = buckets[i].count
case buckets[i].count < max:
buckets[i].count = max
}
}
}
// End of copied code.
func readLines() []string {
r := bufio.NewReader(os.Stdin)
bytes := []byte{}
lines := []string{}
for {
line, isPrefix, err := r.ReadLine()
if err != nil {
break
}
bytes = append(bytes, line...)
if !isPrefix {
str := strings.TrimSpace(string(bytes))
if len(str) > 0 {
lines = append(lines, str)
bytes = []byte{}
}
}
}
if len(bytes) > 0 {
lines = append(lines, string(bytes))
}
return lines
}
func main() {
lines := readLines()
for _, text := range lines {
// Example input
// "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
// bounds - counts - quantile
parts := strings.Split(text, "\",")
var bucketNumbers []float64
// Strip the ends with square brackets
text = parts[0][2 : len(parts[0])-1]
// Parse the bucket bounds
for _, num := range strings.Split(text, ",") {
num = strings.TrimSpace(num)
number, err := strconv.ParseFloat(num, 64)
if err == nil {
bucketNumbers = append(bucketNumbers, number)
}
}
var bucketCounts []float64
// Strip the ends with square brackets
text = parts[1][2 : len(parts[1])-1]
// Parse the bucket counts
for _, num := range strings.Split(text, ",") {
num = strings.TrimSpace(num)
number, err := strconv.ParseFloat(num, 64)
if err == nil {
bucketCounts = append(bucketCounts, number)
}
}
// Parse the quantile
q, err := strconv.ParseFloat(parts[2], 64)
var b buckets
if err == nil {
for i := 0; i < len(bucketNumbers); i++ {
b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
}
}
fmt.Println(bucketQuantile(q, b))
}
}

View File

@@ -30,6 +30,8 @@ server {
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html

View File

@@ -51,7 +51,7 @@ check_os() {
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in
Ubuntu*)
Ubuntu*|Pop!_OS)
desired_os=1
os="ubuntu"
package_manager="apt-get"
@@ -81,6 +81,11 @@ check_os() {
os="centos"
package_manager="yum"
;;
Rocky*)
desired_os=1
os="centos"
package_manager="yum"
;;
SLES*)
desired_os=1
os="sles"
@@ -223,7 +228,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -511,13 +516,15 @@ else
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
echo " By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "👉 Need help Getting Started?"
echo "👉 Need help in Getting Started?"
echo -e "Join us on Slack https://signoz.io/slack"
echo ""
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"

View File

@@ -1,4 +1,4 @@
FROM golang:1.17-buster AS builder
FROM golang:1.18-buster AS builder
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
ARG LD_FLAGS

View File

@@ -9,6 +9,7 @@ import (
"go.signoz.io/signoz/ee/query-service/license"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version"
)
@@ -68,57 +69,75 @@ func (ah *APIHandler) CheckFeature(f string) bool {
}
// RegisterRoutes registers routes for this handler on the given router
func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
// note: add ee override methods first
// routes available only in ee version
router.HandleFunc("/api/v1/licenses",
baseapp.AdminAccess(ah.listLicenses)).
am.AdminAccess(ah.listLicenses)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/licenses",
baseapp.AdminAccess(ah.applyLicense)).
am.AdminAccess(ah.applyLicense)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/featureFlags",
baseapp.OpenAccess(ah.getFeatureFlags)).
am.OpenAccess(ah.getFeatureFlags)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/loginPrecheck",
baseapp.OpenAccess(ah.precheckLogin)).
am.OpenAccess(ah.precheckLogin)).
Methods(http.MethodGet)
// paid plans specific routes
router.HandleFunc("/api/v1/complete/saml",
baseapp.OpenAccess(ah.receiveSAML)).
am.OpenAccess(ah.receiveSAML)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/complete/google",
am.OpenAccess(ah.receiveGoogleAuth)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
baseapp.AdminAccess(ah.listDomainsByOrg)).
am.AdminAccess(ah.listDomainsByOrg)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/domains",
baseapp.AdminAccess(ah.postDomain)).
am.AdminAccess(ah.postDomain)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/domains/{id}",
baseapp.AdminAccess(ah.putDomain)).
am.AdminAccess(ah.putDomain)).
Methods(http.MethodPut)
router.HandleFunc("/api/v1/domains/{id}",
baseapp.AdminAccess(ah.deleteDomain)).
am.AdminAccess(ah.deleteDomain)).
Methods(http.MethodDelete)
// base overrides
router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
ah.APIHandler.RegisterRoutes(router)
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
router.HandleFunc("/api/v2/metrics/query_range", am.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
// PAT APIs
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.createPAT)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/pat", am.OpenAccess(ah.getPATs)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/pat/{id}", am.OpenAccess(ah.deletePAT)).Methods(http.MethodDelete)
ah.APIHandler.RegisterRoutes(router, am)
}
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
version := version.GetVersion()
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
versionResponse := basemodel.GetVersionResponse{
Version: version,
EE: "Y",
SetupCompleted: ah.SetupCompleted,
}
ah.WriteJSON(w, r, versionResponse)
}

View File

@@ -8,9 +8,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
@@ -90,9 +88,16 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
// get invite object
invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil || invite == nil {
if err != nil {
zap.S().Errorf("failed to validate invite token", err)
RespondError(w, model.BadRequest(err), nil)
return
}
if invite == nil {
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
return
}
// get auth domain from email domain
@@ -184,114 +189,149 @@ func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, resp)
}
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
ssoError := []byte("Login failed. Please contact your system administrator")
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
base64.StdEncoding.Encode(dst, ssoError)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
}
// receiveGoogleAuth completes google OAuth response and forwards a request
// to front-end to sign user in
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
var apierr basemodel.BaseApiError
redirectOnError := func() {
ssoError := []byte("Login failed. Please contact your system administrator")
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
base64.StdEncoding.Encode(dst, ssoError)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, string(dst)), http.StatusMovedPermanently)
}
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[ReceiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
redirectOnError()
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
return
}
// the relay state is sent when a login request is submitted to
// Idp.
relayState := r.FormValue("RelayState")
zap.S().Debug("[ReceiveML] relay state", zap.String("relayState", relayState))
relayState := q.Get("state")
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
redirectOnError()
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// derive domain id from relay state now
var domainIdStr string
for k, v := range parsedState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
}
domainId, err := uuid.Parse(domainIdStr)
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to process request- failed to parse domain id ifrom relay", zap.Error(err))
redirectOnError()
handleSsoError(w, r, redirectUri)
return
}
domain, apierr := ah.AppDao().GetDomain(ctx, domainId)
if (apierr != nil) || domain == nil {
zap.S().Errorf("[ReceiveSAML] failed to process request- invalid domain", domainIdStr, zap.Error(apierr))
redirectOnError()
// now that we have domain, use domain to fetch sso settings.
// prepare google callback handler using parsedState -
// which contains redirect URL (front-end endpoint)
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
identity, err := callbackHandler.HandleCallback(r)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}
// receiveSAML completes a SAML request and gets user logged in
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// the relay state is sent when a login request is submitted to
// Idp.
relayState := r.FormValue("RelayState")
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to prepare saml request for domain (%s): %v", domainId, err)
redirectOnError()
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domainId, err)
redirectOnError()
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
if assertionInfo.WarningInfo.InvalidTime {
zap.S().Errorf("[ReceiveSAML] expired saml response for organization (%s): %v", domainId, err)
redirectOnError()
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
email := assertionInfo.NameID
// user email found, now start preparing jwt response
userPayload, baseapierr := ah.AppDao().GetUserByEmail(ctx, email)
if baseapierr != nil {
zap.S().Errorf("[ReceiveSAML] failed to find or register a new user for email %s and org %s", email, domainId, zap.Error(baseapierr.Err))
redirectOnError()
if email == "" {
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
handleSsoError(w, r, redirectUri)
return
}
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to generate access token for email %s and org %s", email, domainId, zap.Error(err))
redirectOnError()
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
userID := userPayload.User.Id
nextPage := fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
userID,
tokenStore.RefreshJwt)
http.Redirect(w, r, nextPage, http.StatusMovedPermanently)
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}

View File

@@ -0,0 +1,236 @@
package api
import (
"bytes"
"fmt"
"net/http"
"sync"
"text/template"
"time"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
"go.signoz.io/signoz/pkg/query-service/app/parser"
"go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
querytemplate "go.signoz.io/signoz/pkg/query-service/utils/queryTemplate"
"go.uber.org/zap"
)
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
ah.APIHandler.QueryRangeMetricsV2(w, r)
return
}
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
// prometheus instant query needs same timestamp
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.PROM {
metricsQueryRangeParams.Start = metricsQueryRangeParams.End
}
// round up the end to nearest multiple
if metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER {
end := (metricsQueryRangeParams.End) / 1000
step := metricsQueryRangeParams.Step
metricsQueryRangeParams.End = (end / step * step) * 1000
}
type channelResult struct {
Series []*basemodel.Series
TableName string
Err error
Name string
Query string
}
execClickHouseQueries := func(queries map[string]string) ([]*basemodel.Series, []string, error, map[string]string) {
var seriesList []*basemodel.Series
var tableName []string
ch := make(chan channelResult, len(queries))
var wg sync.WaitGroup
for name, query := range queries {
wg.Add(1)
go func(name, query string) {
defer wg.Done()
seriesList, tableName, err := ah.opts.DataConnector.GetMetricResultEE(r.Context(), query)
for _, series := range seriesList {
series.QueryName = name
}
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
return
}
ch <- channelResult{Series: seriesList, TableName: tableName}
}(name, query)
}
wg.Wait()
close(ch)
var errs []error
errQuriesByName := make(map[string]string)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Query
continue
}
seriesList = append(seriesList, r.Series...)
tableName = append(tableName, r.TableName)
}
if len(errs) != 0 {
return nil, nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
}
return seriesList, tableName, nil, nil
}
execPromQueries := func(metricsQueryRangeParams *basemodel.QueryRangeParamsV2) ([]*basemodel.Series, error, map[string]string) {
var seriesList []*basemodel.Series
ch := make(chan channelResult, len(metricsQueryRangeParams.CompositeMetricQuery.PromQueries))
var wg sync.WaitGroup
for name, query := range metricsQueryRangeParams.CompositeMetricQuery.PromQueries {
if query.Disabled {
continue
}
wg.Add(1)
go func(name string, query *basemodel.PromQuery) {
var seriesList []*basemodel.Series
defer wg.Done()
tmpl := template.New("promql-query")
tmpl, tmplErr := tmpl.Parse(query.Query)
if tmplErr != nil {
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
return
}
var queryBuf bytes.Buffer
tmplErr = tmpl.Execute(&queryBuf, metricsQueryRangeParams.Variables)
if tmplErr != nil {
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
return
}
query.Query = queryBuf.String()
queryModel := basemodel.QueryRangeParams{
Start: time.UnixMilli(metricsQueryRangeParams.Start),
End: time.UnixMilli(metricsQueryRangeParams.End),
Step: time.Duration(metricsQueryRangeParams.Step * int64(time.Second)),
Query: query.Query,
}
promResult, _, err := ah.opts.DataConnector.GetQueryRangeResult(r.Context(), &queryModel)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query.Query}
return
}
matrix, _ := promResult.Matrix()
for _, v := range matrix {
var s basemodel.Series
s.QueryName = name
s.Labels = v.Metric.Copy().Map()
for _, p := range v.Points {
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
}
seriesList = append(seriesList, &s)
}
ch <- channelResult{Series: seriesList}
}(name, query)
}
wg.Wait()
close(ch)
var errs []error
errQuriesByName := make(map[string]string)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Query
continue
}
seriesList = append(seriesList, r.Series...)
}
if len(errs) != 0 {
return nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
}
return seriesList, nil, nil
}
var seriesList []*basemodel.Series
var tableName []string
var err error
var errQuriesByName map[string]string
switch metricsQueryRangeParams.CompositeMetricQuery.QueryType {
case basemodel.QUERY_BUILDER:
runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME)
if runQueries.Err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: runQueries.Err}, nil)
return
}
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(runQueries.Queries)
case basemodel.CLICKHOUSE:
queries := make(map[string]string)
for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries {
if chQuery.Disabled {
continue
}
tmpl := template.New("clickhouse-query")
tmpl, err := tmpl.Parse(chQuery.Query)
if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
return
}
var query bytes.Buffer
// replace go template variables
querytemplate.AssignReservedVars(metricsQueryRangeParams)
err = tmpl.Execute(&query, metricsQueryRangeParams.Variables)
if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
return
}
queries[name] = query.String()
}
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(queries)
case basemodel.PROM:
seriesList, err, errQuriesByName = execPromQueries(metricsQueryRangeParams)
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, errQuriesByName)
return
}
if err != nil {
apiErrObj := &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
return
}
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
len(seriesList) > 1 &&
(metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER ||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.CLICKHOUSE) {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil)
return
}
type ResponseFormat struct {
ResultType string `json:"resultType"`
Result []*basemodel.Series `json:"result"`
TableName []string `json:"tableName"`
}
resp := ResponseFormat{ResultType: "matrix", Result: seriesList, TableName: tableName}
ah.Respond(w, resp)
}

View File

@@ -0,0 +1,107 @@
package api
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.uber.org/zap"
)
func generatePATToken() string {
// Generate a 32-byte random token.
token := make([]byte, 32)
rand.Read(token)
// Encode the token in base64.
encodedToken := base64.StdEncoding.EncodeToString(token)
return encodedToken
}
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := model.PAT{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
// All the PATs are associated with the user creating the PAT. Hence, the permissions
// associated with the PAT is also equivalent to that of the user.
req.UserID = user.Id
req.CreatedAt = time.Now().Unix()
req.Token = generatePATToken()
zap.S().Debugf("Got PAT request: %+v", req)
if apierr := ah.AppDao().CreatePAT(ctx, &req); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &req)
}
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
zap.S().Infof("Get PATs for user: %+v", user.Id)
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, pats)
}
func (ah *APIHandler) deletePAT(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
id := mux.Vars(r)["id"]
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
if pat.UserID != user.Id {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: fmt.Errorf("unauthorized PAT delete request"),
}, nil)
return
}
zap.S().Debugf("Delete PAT with id: %+v", id)
if apierr := ah.AppDao().DeletePAT(ctx, id); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, map[string]string{"data": "pat deleted successfully"})
}

View File

@@ -0,0 +1,39 @@
package api
import (
"net/http"
"strconv"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
ah.APIHandler.SearchTraces(w, r)
return
}
traceId, spanId, levelUpInt, levelDownInt, err := baseapp.ParseSearchTracesParams(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
return
}
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
if err != nil {
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
if ah.HandleError(w, err, http.StatusBadRequest) {
return
}
ah.WriteJSON(w, r, result)
}

View File

@@ -0,0 +1,401 @@
package db
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"reflect"
"regexp"
"sort"
"strings"
"time"
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
// GetMetricResultEE runs the query and returns list of time series
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult")()
zap.S().Infof("Executing metric result query: %s", query)
var hash string
// If getSubTreeSpans function is used in the clickhouse query
if strings.Index(query, "getSubTreeSpans(") != -1 {
var err error
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
if err == fmt.Errorf("No spans found for the given query") {
return nil, "", nil
}
if err != nil {
return nil, "", err
}
}
rows, err := r.conn.Query(ctx, query)
zap.S().Debug(query)
if err != nil {
zap.S().Debug("Error in processing query: ", err)
return nil, "", fmt.Errorf("error in processing query")
}
var (
columnTypes = rows.ColumnTypes()
columnNames = rows.Columns()
vars = make([]interface{}, len(columnTypes))
)
for i := range columnTypes {
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make(map[string][]basemodel.MetricPoint)
// attribute key-value pairs for each group selection
attributesMap := make(map[string]map[string]string)
defer rows.Close()
for rows.Next() {
if err := rows.Scan(vars...); err != nil {
return nil, "", err
}
var groupBy []string
var metricPoint basemodel.MetricPoint
groupAttributes := make(map[string]string)
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx, v := range vars {
colName := columnNames[idx]
switch v := v.(type) {
case *string:
// special case for returning all labels
if colName == "fullLabels" {
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
return nil, "", err
}
for key, val := range metric {
groupBy = append(groupBy, val)
groupAttributes[key] = val
}
} else {
groupBy = append(groupBy, *v)
groupAttributes[colName] = *v
}
case *time.Time:
metricPoint.Timestamp = v.UnixMilli()
case *float64:
metricPoint.Value = *v
case **float64:
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := *v
if floatVal != nil {
metricPoint.Value = *floatVal
}
case *float32:
float32Val := float32(*v)
metricPoint.Value = float64(float32Val)
case *uint8, *uint64, *uint16, *uint32:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
}
case *int8, *int16, *int32, *int64:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
}
}
sort.Strings(groupBy)
key := strings.Join(groupBy, "")
attributesMap[key] = groupAttributes
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
}
var seriesList []*basemodel.Series
for key := range metricPointsMap {
points := metricPointsMap[key]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len(points) != 0 && len(points) > 1 {
points = points[1:]
}
attributes := attributesMap[key]
series := basemodel.Series{Labels: attributes, Points: points}
seriesList = append(seriesList, &series)
}
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil {
// zap.S().Error("Error in dropping temporary table: ", err)
// return nil, err
// }
if hash == "" {
return seriesList, hash, nil
} else {
return seriesList, "getSubTreeSpans" + hash, nil
}
}
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.S().Debugf("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
// process the query to fetch subTree query
var subtreeInput string
query, subtreeInput, hash = processQuery(query, hash)
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil {
zap.S().Error("Error in dropping temporary table: ", err)
return query, hash, err
}
// Create temporary table to store the getSubTreeSpans() results
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil {
zap.S().Error("Error in creating temporary table: ", err)
return query, hash, err
}
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput
// Execute the subTree query
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.S().Info(getSpansSubQuery)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return query, hash, fmt.Errorf("Error in processing sql query")
}
var searchScanResponses []basemodel.SearchSpanDBResponseItem
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
// Fetch all the spans from of same TraceID so that we can build subtree
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("No spans found for the given query")
}
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return query, hash, fmt.Errorf("Error in processing sql query")
}
// Process model to fetch the spans
zap.S().Debugf("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem
json.Unmarshal([]byte(item.Model), &jsonItem)
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
if jsonItem.Events == nil {
jsonItem.Events = []string{}
}
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil {
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
return query, hash, err
}
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil {
zap.S().Error("Error in preparing batch statement: ", err)
return query, hash, err
}
for _, span := range treeSearchResponse {
var parentID string
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
parentID = span.References[0].SpanId
}
err = statement.Append(
time.Unix(0, int64(span.TimeUnixNano)),
span.TraceID,
span.SpanID,
parentID,
span.RootSpanID,
span.ServiceName,
span.Name,
span.RootName,
uint64(span.DurationNano),
int8(span.Kind),
span.TagMap,
span.Events,
)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return query, hash, err
}
}
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
err = statement.Send()
if err != nil {
zap.S().Error("Error in sending statement: ", err)
return query, hash, err
}
return query, hash, nil
}
func processQuery(query string, hash string) (string, string, string) {
re3 := regexp.MustCompile(`getSubTreeSpans`)
submatchall3 := re3.FindAllStringIndex(query, -1)
getSubtreeSpansMatchIndex := submatchall3[0][1]
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
sqlCompleteIndex := 0
countParenthesisImbalance := 0
for i, char := range query2countParenthesis {
if string(char) == "(" {
countParenthesisImbalance += 1
}
if string(char) == ")" {
countParenthesisImbalance -= 1
}
if countParenthesisImbalance == 0 {
sqlCompleteIndex = i
break
}
}
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
// hash the subtreeInput
hmd5 := md5.Sum([]byte(subtreeInput))
hash = fmt.Sprintf("%x", hmd5)
// Reformat the query to use the getSubTreeSpans function
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
return query, subtreeInput, hash
}
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
var spans []*model.SpanForTraceDetails
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
zap.S().Debug("Building Tree")
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
// Every span which was fetched from getSubTree Input SQL query is considered root
// For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{}
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
if targetSpan != nil {
break
}
if err != nil {
zap.S().Error("Error during BreadthFirstSearch(): ", err)
return nil, err
}
}
if targetSpan == nil {
return nil, nil
}
// Build subtree for the target span
// Mark the target span as root by setting parent ID as empty string
targetSpan.ParentID = ""
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
// Get the subtree child spans
for i := 0; len(preParents) != 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
resultSpans := children
// Add the target span to the result spans
resultSpans = append(resultSpans, targetSpan)
for _, item := range resultSpans {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
TimeUnixNano: item.TimeUnixNano,
SpanID: item.SpanID,
TraceID: item.TraceID,
ServiceName: item.ServiceName,
Name: item.Name,
Kind: item.Kind,
References: references,
DurationNano: item.DurationNano,
TagMap: item.TagMap,
Events: item.Events,
HasError: item.HasError,
RootSpanID: getSpansSubQueryDBResponse.SpanID,
RootName: targetSpan.Name,
}
}
}
return searchSpansResult, nil
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/jmoiron/sqlx"
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/interfaces"
)
type ClickhouseReader struct {
@@ -14,8 +15,8 @@ type ClickhouseReader struct {
*basechr.ClickHouseReader
}
func NewDataConnector(localDB *sqlx.DB, promConfigPath string) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath)
func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm)
return &ClickhouseReader{
conn: ch.GetConn(),
appdb: localDB,

View File

@@ -0,0 +1,222 @@
package db
import (
"errors"
"strconv"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
var spans []*model.SpanForTraceDetails
// Build a slice of spans from the payload
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
// Build span trees from the spans
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
targetSpan := &model.SpanForTraceDetails{}
// Find the target span in the span trees
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, targetSpanId)
if targetSpan != nil {
break
}
if err != nil {
zap.S().Error("Error during BreadthFirstSearch(): ", err)
return nil, err
}
}
// If the target span is not found, return span not found error
if targetSpan == nil {
return nil, errors.New("Span not found")
}
// Build the final result
parents := []*model.SpanForTraceDetails{}
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
preParent := targetSpan
for i := 0; i < levelUp+1; i++ {
if i == levelUp {
preParent.ParentID = ""
}
if spanLimit-len(preParent.Children) <= 0 {
parents = append(parents, preParent)
parents = append(parents, preParent.Children[:spanLimit]...)
spanLimit -= (len(preParent.Children[:spanLimit]) + 1)
preParent.ParentID = ""
break
}
parents = append(parents, preParent)
parents = append(parents, preParent.Children...)
spanLimit -= (len(preParent.Children) + 1)
preParent = preParent.ParentSpan
if preParent == nil {
break
}
}
// Get the child spans of the target span until the given levelDown and spanLimit
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
if spanLimit-len(parent.Children) <= 0 {
children = append(children, parent.Children[:spanLimit]...)
spanLimit -= len(parent.Children[:spanLimit])
break
}
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
// Store the final list of spans in the resultSpanSet map to avoid duplicates
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
resultSpansSet[targetSpan] = struct{}{}
for _, parent := range parents {
resultSpansSet[parent] = struct{}{}
}
for _, child := range children {
resultSpansSet[child] = struct{}{}
}
searchSpansResult := []basemodel.SearchSpansResult{{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
Events: make([][]interface{}, len(resultSpansSet)),
},
}
// Convert the resultSpansSet map to searchSpansResult
i := 0 // index for spans
for item := range resultSpansSet {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
referencesStringArray := []string{}
for _, item := range references {
referencesStringArray = append(referencesStringArray, item.ToString())
}
keys := make([]string, 0, len(item.TagMap))
values := make([]string, 0, len(item.TagMap))
for k, v := range item.TagMap {
keys = append(keys, k)
values = append(values, v)
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[0].Events[i] = []interface{}{
item.TimeUnixNano,
item.SpanID,
item.TraceID,
item.ServiceName,
item.Name,
strconv.Itoa(int(item.Kind)),
strconv.FormatInt(item.DurationNano, 10),
keys,
values,
referencesStringArray,
item.Events,
item.HasError,
}
i++ // increment index
}
return searchSpansResult, nil
}
// buildSpanTrees builds trees of spans from a list of spans.
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
// Build a map of spanID to span for fast lookup
var roots []*model.SpanForTraceDetails
spans := *spansPtr
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
for _, span := range spans {
if span.ParentID == "" {
roots = append(roots, span)
}
mapOfSpans[span.SpanID] = span
}
// Build the span tree by adding children to the parent spans
for _, span := range spans {
if span.ParentID == "" {
continue
}
parent := mapOfSpans[span.ParentID]
// If the parent span is not found, add current span to list of roots
if parent == nil {
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
roots = append(roots, span)
span.ParentID = ""
continue
}
span.ParentSpan = parent
parent.Children = append(parent.Children, span)
}
return roots, nil
}
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
queue := []*model.SpanForTraceDetails{spansPtr}
visited := make(map[string]bool)
for len(queue) > 0 {
current := queue[0]
visited[current.SpanID] = true
queue = queue[1:]
if current.SpanID == targetId {
return current, nil
}
for _, child := range current.Children {
if ok, _ := visited[child.SpanID]; !ok {
queue = append(queue, child)
}
}
}
return nil, nil
}

View File

@@ -1,8 +1,11 @@
package app
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // http profiler
@@ -20,12 +23,20 @@ import (
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/interfaces"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/query-service/agentConf"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry"
@@ -33,6 +44,8 @@ import (
"go.uber.org/zap"
)
const AppDbEngine = "sqlite"
type ServerOptions struct {
PromConfigPath string
HTTPHostPort string
@@ -76,6 +89,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
@@ -98,7 +113,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath)
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
go qb.Start(readerReady)
reader = qb
} else {
@@ -117,6 +132,27 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
// initiate opamp
_, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
return nil, err
}
// initiate agent config handler
if err := agentConf.Initiate(localDB, AppDbEngine); err != nil {
return nil, err
}
// start the usagemanager
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
if err != nil {
return nil, err
}
err = usageManager.Start()
if err != nil {
return nil, err
}
telemetry.GetInstance().SetReader(reader)
apiOpts := api.APIHandlerOptions{
@@ -173,7 +209,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
// ip here for alert manager
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY"},
})
handler := c.Handler(r)
@@ -188,13 +224,33 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
r := mux.NewRouter()
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
patToken := r.Header.Get("SIGNOZ-API-KEY")
if len(patToken) > 0 {
zap.S().Debugf("Received a non-zero length PAT token")
ctx := context.Background()
dao := apiHandler.AppDao()
user, err := dao.GetUserByPAT(ctx, patToken)
if err == nil && user != nil {
zap.S().Debugf("Found valid PAT user: %+v", user)
return user, nil
}
if err != nil {
zap.S().Debugf("Error while getting user for PAT: %+v", err)
}
}
return baseauth.GetUserFromRequest(r)
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
apiHandler.RegisterRoutes(r)
apiHandler.RegisterMetricsRoutes(r)
apiHandler.RegisterLogsRoutes(r)
apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterMetricsRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am)
apiHandler.RegisterQueryRangeV3Routes(r, am)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
@@ -255,15 +311,82 @@ func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
data := map[string]interface{}{}
var postData *basemodel.QueryRangeParamsV2
if path == pathToExtractBodyFrom && (r.Method == "POST") {
if r.Body != nil {
bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, false
}
r.Body.Close() // must close
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &postData)
} else {
return nil, false
}
} else {
return nil, false
}
signozMetricNotFound := false
if postData != nil {
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
if postData.CompositeMetricQuery != nil {
data["queryType"] = postData.CompositeMetricQuery.QueryType
data["panelType"] = postData.CompositeMetricQuery.PanelType
}
data["datasource"] = postData.DataSource
}
if signozMetricNotFound {
telemetry.GetInstance().AddActiveMetricsUser()
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
}
return data, true
}
func getActiveLogs(path string, r *http.Request) {
// if path == "/api/v1/dashboards/{uuid}" {
// telemetry.GetInstance().AddActiveMetricsUser()
// }
if path == "/api/v1/logs" {
hasFilters := len(r.URL.Query().Get("q"))
if hasFilters > 0 {
telemetry.GetInstance().AddActiveLogsUser()
}
}
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range dashboardMetadata {
data[key] = value
}
}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
@@ -365,7 +488,7 @@ func (s *Server) Start() error {
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
privatePort = port
}
fmt.Println("starting private http")
go func() {
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
@@ -381,6 +504,37 @@ func (s *Server) Start() error {
}()
go func() {
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
err := opamp.InitalizeServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents)
if err != nil {
zap.S().Info("opamp ws server failed to start", err)
s.unavailableChannel <- healthcheck.Unavailable
}
}()
return nil
}
func (s *Server) Stop() error {
if s.httpServer != nil {
if err := s.httpServer.Shutdown(context.Background()); err != nil {
return err
}
}
if s.privateHTTP != nil {
if err := s.privateHTTP.Shutdown(context.Background()); err != nil {
return err
}
}
opamp.StopServer()
if s.ruleManager != nil {
s.ruleManager.Stop()
}
return nil
}

View File

@@ -10,6 +10,8 @@ const (
var LicenseSignozIo = "https://license.signoz.io/api/v1"
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key)
if len(v) == 0 {

View File

@@ -2,6 +2,7 @@ package dao
import (
"context"
"net/url"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
@@ -22,6 +23,8 @@ type ModelDao interface {
// auth methods
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
@@ -30,4 +33,11 @@ type ModelDao interface {
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError)
DeletePAT(ctx context.Context, id string) basemodel.BaseApiError
}

View File

@@ -10,9 +10,33 @@ import (
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.uber.org/zap"
)
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
if err != nil {
zap.S().Errorf("failed to generate token for SSO login user", err)
return "", model.InternalErrorStr("failed to generate token for the user")
}
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
userPayload.User.Id,
tokenStore.RefreshJwt), nil
}
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {

View File

@@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"encoding/json"
"net/url"
"fmt"
"strings"
"time"
@@ -25,6 +26,34 @@ type StoredDomain struct {
UpdatedAt int64 `db:"updated_at"`
}
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
}
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.S().Errorf("failed to parse domain id from relay state", err)
return nil, fmt.Errorf("failed to parse response from IdP response")
}
domain, err := m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
}
return domain, nil
}
// GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {

View File

@@ -48,7 +48,17 @@ func InitDB(dataSourceName string) (*modelDao, error) {
updated_at INTEGER,
data TEXT NOT NULL,
FOREIGN KEY(org_id) REFERENCES organizations(id)
);`
);
CREATE TABLE IF NOT EXISTS personal_access_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id TEXT NOT NULL,
token TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
created_at INTEGER NOT NULL,
expires_at INTEGER NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id)
);
`
_, err = m.DB().Exec(table_schema)
if err != nil {

View File

@@ -0,0 +1,106 @@
package sqlite
import (
"context"
"fmt"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (m *modelDao) CreatePAT(ctx context.Context, p *model.PAT) basemodel.BaseApiError {
_, err := m.DB().ExecContext(ctx,
"INSERT INTO personal_access_tokens (user_id, token, name, created_at, expires_at) VALUES ($1, $2, $3, $4, $5)",
p.UserID,
p.Token,
p.Name,
p.CreatedAt,
p.ExpiresAt)
if err != nil {
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT insertion failed"))
}
return nil
}
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=?;`, userID); err != nil {
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err))
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
}
return pats, nil
}
func (m *modelDao) DeletePAT(ctx context.Context, id string) basemodel.BaseApiError {
_, err := m.DB().ExecContext(ctx, `DELETE from personal_access_tokens where id=?;`, id)
if err != nil {
zap.S().Errorf("Failed to delete PAT, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to delete PAT"))
}
return nil
}
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=?;`, token); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
}
if len(pats) != 1 {
return nil, &model.ApiError{
Typ: model.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token, %s", token),
}
}
return &pats[0], nil
}
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=?;`, id); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
}
if len(pats) != 1 {
return nil, &model.ApiError{
Typ: model.ErrorInternal,
Err: fmt.Errorf("found zero or multiple PATs with same token"),
}
}
return &pats[0], nil
}
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) {
users := []basemodel.UserPayload{}
query := `SELECT
u.id,
u.name,
u.email,
u.password,
u.created_at,
u.profile_picture_url,
u.org_id,
u.group_id
FROM users u, personal_access_tokens p
WHERE u.id = p.user_id and p.token=?;`
if err := m.DB().Select(&users, query, token); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
}
if len(users) != 1 {
return nil, &model.ApiError{
Typ: model.ErrorInternal,
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
}
}
return &users[0], nil
}

View File

@@ -127,7 +127,7 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
}
// SendUsage reports the usage of signoz to license server
func SendUsage(ctx context.Context, usage *model.UsagePayload) *model.ApiError {
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
reqString, _ := json.Marshal(usage)
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {

View File

@@ -10,6 +10,8 @@ import (
"sync"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
@@ -92,6 +94,8 @@ func (lm *Manager) SetActive(l *model.License) {
lm.activeLicense = l
lm.activeFeatures = l.FeatureSet
// set default features
setDefaultFeatures(lm)
if !lm.validatorRunning {
// we want to make sure only one validator runs,
// we already have lock() so good to go
@@ -101,7 +105,13 @@ func (lm *Manager) SetActive(l *model.License) {
}
// LoadActiveLicense loads the most recent active licenseex
func setDefaultFeatures(lm *Manager) {
for k, v := range baseconstants.DEFAULT_FEATURE_SET {
lm.activeFeatures[k] = v
}
}
// LoadActiveLicense loads the most recent active license
func (lm *Manager) LoadActiveLicense() error {
var err error
active, err := lm.repo.GetActiveLicense(context.Background())
@@ -111,7 +121,10 @@ func (lm *Manager) LoadActiveLicense() error {
if active != nil {
lm.SetActive(active)
} else {
zap.S().Info("No active license found.")
zap.S().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = basemodel.BasicPlan
setDefaultFeatures(lm)
}
return nil
@@ -278,8 +291,11 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
// CheckFeature will be internally used by backend routines
// for feature gating
func (lm *Manager) CheckFeature(featureKey string) error {
if _, ok := lm.activeFeatures[featureKey]; ok {
return nil
if value, ok := lm.activeFeatures[featureKey]; ok {
if value {
return nil
}
return basemodel.ErrFeatureUnavailable{Key: featureKey}
}
return basemodel.ErrFeatureUnavailable{Key: featureKey}
}

View File

@@ -9,8 +9,10 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
saml2 "github.com/russellhaering/gosaml2"
"go.signoz.io/signoz/ee/query-service/saml"
"go.signoz.io/signoz/ee/query-service/sso/saml"
"go.signoz.io/signoz/ee/query-service/sso"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
type SSOType string
@@ -20,12 +22,6 @@ const (
GoogleAuth SSOType = "GOOGLE_AUTH"
)
type SamlConfig struct {
SamlEntity string `json:"samlEntity"`
SamlIdp string `json:"samlIdp"`
SamlCert string `json:"samlCert"`
}
// OrgDomain identify org owned web domains for auth and other purposes
type OrgDomain struct {
Id uuid.UUID `json:"id"`
@@ -33,10 +29,17 @@ type OrgDomain struct {
OrgId string `json:"orgId"`
SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"`
SamlConfig *SamlConfig `json:"samlConfig"`
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
Org *basemodel.Organization
}
func (od *OrgDomain) String() string {
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.Id.String(), od.SsoType)
}
// Valid is used a pipeline function to check if org domain
// loaded from db is valid
func (od *OrgDomain) Valid(err error) error {
@@ -97,6 +100,16 @@ func (od *OrgDomain) GetSAMLCert() string {
return ""
}
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
// requesting OAuth and also used in processing response from google
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
if od.GoogleAuthConfig == nil {
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
}
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
}
// PrepareSamlRequest creates a request accordingly gosaml2
func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
@@ -124,19 +137,48 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
}
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
sp, err := od.PrepareSamlRequest(siteUrl)
if err != nil {
return "", err
}
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
// build redirect url from window.location sent by frontend
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
// prepare state that gets relayed back when the auth provider
// calls back our url. here we pass the app url (where signoz runs)
// and the domain Id. The domain Id helps in identifying sso config
// when the call back occurs and the app url is useful in redirecting user
// back to the right path.
// why do we need to pass app url? the callback typically is handled by backend
// and sometimes backend might right at a different port or is unaware of frontend
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
// along with frontend window.location and use it to relay the information through
// auth provider to the backend (HandleCallback or HandleSSO method).
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
switch (od.SsoType) {
case SAML:
sp, err := od.PrepareSamlRequest(siteUrl)
if err != nil {
return "", err
}
return sp.BuildAuthURL(relayState)
case GoogleAuth:
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
if err != nil {
return "", err
}
return googleProvider.BuildAuthURL(relayState)
default:
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
return "", fmt.Errorf("unsupported SSO config for the domain")
}
relayState := fmt.Sprintf("%s://%s%s?domainId=%s",
siteUrl.Scheme,
siteUrl.Host,
siteUrl.Path,
fmtDomainId)
return sp.BuildAuthURL(relayState)
}

View File

@@ -1,6 +1,7 @@
package model
import (
"fmt"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
@@ -44,6 +45,14 @@ func BadRequest(err error) *ApiError {
}
}
// BadRequestStr returns a ApiError object of bad request for string input
func BadRequestStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorBadData,
Err: fmt.Errorf(s),
}
}
// InternalError returns a ApiError object of internal type
func InternalError(err error) *ApiError {
return &ApiError{
@@ -52,6 +61,14 @@ func InternalError(err error) *ApiError {
}
}
// InternalErrorStr returns a ApiError object of internal type for string input
func InternalErrorStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf(s),
}
}
var (
ErrorNone basemodel.ErrorType = ""
ErrorTimeout basemodel.ErrorType = "timeout"

View File

@@ -0,0 +1,10 @@
package model
type PAT struct {
Id string `json:"id" db:"id"`
UserID string `json:"userId" db:"user_id"`
Token string `json:"token" db:"token"`
Name string `json:"name" db:"name"`
CreatedAt int64 `json:"createdAt" db:"created_at"`
ExpiresAt int64 `json:"expiresAt" db:"expires_at"` // unused as of now
}

View File

@@ -17,11 +17,15 @@ var BasicPlan = basemodel.FeatureSet{
}
var ProPlan = basemodel.FeatureSet{
Pro: true,
SSO: true,
Pro: true,
SSO: true,
basemodel.SmartTraceDetail: true,
basemodel.CustomMetricsFunction: true,
}
var EnterprisePlan = basemodel.FeatureSet{
Enterprise: true,
SSO: true,
Enterprise: true,
SSO: true,
basemodel.SmartTraceDetail: true,
basemodel.CustomMetricsFunction: true,
}

View File

@@ -0,0 +1,68 @@
package model
import (
"fmt"
"context"
"net/url"
"golang.org/x/oauth2"
"github.com/coreos/go-oidc/v3/oidc"
"go.signoz.io/signoz/ee/query-service/sso"
)
// SamlConfig contans SAML params to generate and respond to the requests
// from SAML provider
type SamlConfig struct {
SamlEntity string `json:"samlEntity"`
SamlIdp string `json:"samlIdp"`
SamlCert string `json:"samlCert"`
}
// GoogleOauthConfig contains a generic config to support oauth
type GoogleOAuthConfig struct {
ClientID string `json:"clientId"`
ClientSecret string `json:"clientSecret"`
RedirectURI string `json:"redirectURI"`
}
const (
googleIssuerURL = "https://accounts.google.com"
)
func (g *GoogleOAuthConfig) GetProvider(domain string, siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
ctx, cancel := context.WithCancel(context.Background())
provider, err := oidc.NewProvider(ctx, googleIssuerURL)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// default to email and profile scope as we just use google auth
// to verify identity and start a session.
scopes := []string{"email"}
// this is the url google will call after login completion
redirectURL := fmt.Sprintf("%s://%s/%s",
siteUrl.Scheme,
siteUrl.Host,
"api/v1/complete/google")
return &sso.GoogleOAuthProvider{
RedirectURI: g.RedirectURI,
OAuth2Config: &oauth2.Config{
ClientID: g.ClientID,
ClientSecret: g.ClientSecret,
Endpoint: provider.Endpoint(),
Scopes: scopes,
RedirectURL: redirectURL,
},
Verifier: provider.Verifier(
&oidc.Config{ClientID: g.ClientID},
),
Cancel: cancel,
HostedDomain: domain,
}, nil
}

View File

@@ -0,0 +1,22 @@
package model
type SpanForTraceDetails struct {
TimeUnixNano uint64 `json:"timestamp"`
SpanID string `json:"spanID"`
TraceID string `json:"traceID"`
ParentID string `json:"parentID"`
ParentSpan *SpanForTraceDetails `json:"parentSpan"`
ServiceName string `json:"serviceName"`
Name string `json:"name"`
Kind int32 `json:"kind"`
DurationNano int64 `json:"durationNano"`
TagMap map[string]string `json:"tagMap"`
Events []string `json:"event"`
HasError bool `json:"hasError"`
Children []*SpanForTraceDetails `json:"children"`
}
type GetSpansSubQueryDBResponse struct {
SpanID string `ch:"spanID"`
TraceID string `ch:"traceID"`
}

View File

@@ -6,30 +6,27 @@ import (
"github.com/google/uuid"
)
type UsageSnapshot struct {
CurrentLogSizeBytes uint64 `json:"currentLogSizeBytes"`
CurrentLogSizeBytesColdStorage uint64 `json:"currentLogSizeBytesColdStorage"`
CurrentSpansCount uint64 `json:"currentSpansCount"`
CurrentSpansCountColdStorage uint64 `json:"currentSpansCountColdStorage"`
CurrentSamplesCount uint64 `json:"currentSamplesCount"`
CurrentSamplesCountColdStorage uint64 `json:"currentSamplesCountColdStorage"`
}
type UsageBase struct {
Id uuid.UUID `json:"id" db:"id"`
InstallationId uuid.UUID `json:"installationId" db:"installation_id"`
ActivationId uuid.UUID `json:"activationId" db:"activation_id"`
CreatedAt time.Time `json:"createdAt" db:"created_at"`
FailedSyncRequest int `json:"failedSyncRequest" db:"failed_sync_request_count"`
}
type UsagePayload struct {
UsageBase
Metrics UsageSnapshot `json:"metrics"`
SnapshotDate time.Time `json:"snapshotDate"`
InstallationId uuid.UUID `json:"installationId"`
LicenseKey uuid.UUID `json:"licenseKey"`
Usage []Usage `json:"usage"`
}
type Usage struct {
UsageBase
Snapshot string `db:"snapshot"`
CollectorID string `json:"collectorId"`
ExporterID string `json:"exporterId"`
Type string `json:"type"`
Tenant string `json:"tenant"`
TimeStamp time.Time `json:"timestamp"`
Count int64 `json:"count"`
Size int64 `json:"size"`
}
type UsageDB struct {
CollectorID string `ch:"collector_id" json:"collectorId"`
ExporterID string `ch:"exporter_id" json:"exporterId"`
Type string `ch:"-" json:"type"`
TimeStamp time.Time `ch:"timestamp" json:"timestamp"`
Tenant string `ch:"tenant" json:"tenant"`
Data string `ch:"data" json:"data"`
}

View File

@@ -0,0 +1,92 @@
package sso
import (
"fmt"
"errors"
"context"
"net/http"
"github.com/coreos/go-oidc/v3/oidc"
"golang.org/x/oauth2"
)
type GoogleOAuthProvider struct {
RedirectURI string
OAuth2Config *oauth2.Config
Verifier *oidc.IDTokenVerifier
Cancel context.CancelFunc
HostedDomain string
}
func (g *GoogleOAuthProvider) BuildAuthURL(state string) (string, error) {
var opts []oauth2.AuthCodeOption
// set hosted domain. google supports multiple hosted domains but in our case
// we have one config per host domain.
opts = append(opts, oauth2.SetAuthURLParam("hd", g.HostedDomain))
return g.OAuth2Config.AuthCodeURL(state, opts...), nil
}
type oauth2Error struct{
error string
errorDescription string
}
func (e *oauth2Error) Error() string {
if e.errorDescription == "" {
return e.error
}
return e.error + ": " + e.errorDescription
}
func (g *GoogleOAuthProvider) HandleCallback(r *http.Request) (identity *SSOIdentity, err error) {
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
return identity, &oauth2Error{errType, q.Get("error_description")}
}
token, err := g.OAuth2Config.Exchange(r.Context(), q.Get("code"))
if err != nil {
return identity, fmt.Errorf("google: failed to get token: %v", err)
}
return g.createIdentity(r.Context(), token)
}
func (g *GoogleOAuthProvider) createIdentity(ctx context.Context, token *oauth2.Token) (identity *SSOIdentity, err error) {
rawIDToken, ok := token.Extra("id_token").(string)
if !ok {
return identity, errors.New("google: no id_token in token response")
}
idToken, err := g.Verifier.Verify(ctx, rawIDToken)
if err != nil {
return identity, fmt.Errorf("google: failed to verify ID Token: %v", err)
}
var claims struct {
Username string `json:"name"`
Email string `json:"email"`
EmailVerified bool `json:"email_verified"`
HostedDomain string `json:"hd"`
}
if err := idToken.Claims(&claims); err != nil {
return identity, fmt.Errorf("oidc: failed to decode claims: %v", err)
}
if claims.HostedDomain != g.HostedDomain {
return identity, fmt.Errorf("oidc: unexpected hd claim %v", claims.HostedDomain)
}
identity = &SSOIdentity{
UserID: idToken.Subject,
Username: claims.Username,
Email: claims.Email,
EmailVerified: claims.EmailVerified,
ConnectorData: []byte(token.RefreshToken),
}
return identity, nil
}

View File

@@ -0,0 +1,31 @@
package sso
import (
"net/http"
)
// SSOIdentity contains details of user received from SSO provider
type SSOIdentity struct {
UserID string
Username string
PreferredUsername string
Email string
EmailVerified bool
ConnectorData []byte
}
// OAuthCallbackProvider is an interface implemented by connectors which use an OAuth
// style redirect flow to determine user information.
type OAuthCallbackProvider interface {
// The initial URL user would be redirect to.
// OAuth2 implementations support various scopes but we only need profile and user as
// the roles are still being managed in SigNoz.
BuildAuthURL(state string) (string, error)
// Handle the callback to the server (after login at oauth provider site)
// and return a email identity.
// At the moment we dont support auto signup flow (based on domain), so
// the full identity (including name, group etc) is not required outside of the
// connector
HandleCallback(r *http.Request) (identity *SSOIdentity, err error)
}

View File

@@ -4,18 +4,19 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.uber.org/zap"
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/ee/query-service/usage/repository"
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
)
@@ -27,9 +28,6 @@ const (
)
var (
// collect usage every hour
collectionFrequency = 1 * time.Hour
// send usage every 24 hour
uploadFrequency = 24 * time.Hour
@@ -37,8 +35,6 @@ var (
)
type Manager struct {
repository *repository.Repository
clickhouseConn clickhouse.Conn
licenseRepo *license.Repo
@@ -52,15 +48,9 @@ type Manager struct {
}
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
repo := repository.New(db)
err := repo.Init(dbType)
if err != nil {
return nil, fmt.Errorf("failed to initiate usage repo: %v", err)
}
m := &Manager{
repository: repo,
// repository: repo,
clickhouseConn: clickhouseConn,
licenseRepo: licenseRepo,
}
@@ -74,6 +64,28 @@ func (lm *Manager) Start() error {
return fmt.Errorf("usage exporter is locked")
}
go lm.UsageExporter(context.Background())
return nil
}
func (lm *Manager) UsageExporter(ctx context.Context) {
defer close(lm.terminated)
uploadTicker := time.NewTicker(uploadFrequency)
defer uploadTicker.Stop()
for {
select {
case <-lm.done:
return
case <-uploadTicker.C:
lm.UploadUsage(ctx)
}
}
}
func (lm *Manager) UploadUsage(ctx context.Context) error {
// check if license is present or not
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
if err != nil {
@@ -85,203 +97,81 @@ func (lm *Manager) Start() error {
return nil
}
// upload previous snapshots if any
err = lm.UploadUsage(context.Background())
if err != nil {
return err
}
// collect snapshot if incase it wasn't collect in (t - collectionFrequency)
err = lm.CollectCurrentUsage(context.Background())
if err != nil {
return err
}
go lm.UsageExporter(context.Background())
return nil
}
// CollectCurrentUsage checks if needs to collect usage data
func (lm *Manager) CollectCurrentUsage(ctx context.Context) error {
// check the DB if anything exist where timestamp > t - collectionFrequency
ts := time.Now().Add(-collectionFrequency)
alreadyCreated, err := lm.repository.CheckSnapshotGtCreatedAt(ctx, ts)
if err != nil {
return err
}
if !alreadyCreated {
zap.S().Info("Collecting current usage")
exportError := lm.CollectAndStoreUsage(ctx)
if exportError != nil {
return exportError
}
} else {
zap.S().Info("Nothing to collect")
}
return nil
}
func (lm *Manager) UsageExporter(ctx context.Context) {
defer close(lm.terminated)
collectionTicker := time.NewTicker(collectionFrequency)
defer collectionTicker.Stop()
uploadTicker := time.NewTicker(uploadFrequency)
defer uploadTicker.Stop()
for {
select {
case <-lm.done:
return
case <-collectionTicker.C:
lm.CollectAndStoreUsage(ctx)
case <-uploadTicker.C:
lm.UploadUsage(ctx)
// remove the old snapshots
lm.repository.DropOldSnapshots(ctx)
}
}
}
type TableSize struct {
Table string `ch:"table"`
DiskName string `ch:"disk_name"`
Rows uint64 `ch:"rows"`
UncompressedBytes uint64 `ch:"uncompressed_bytes"`
}
func (lm *Manager) CollectAndStoreUsage(ctx context.Context) error {
snap, err := lm.GetUsageFromClickHouse(ctx)
if err != nil {
return err
}
license, err := lm.licenseRepo.GetActiveLicense(ctx)
if err != nil {
return err
}
activationId, _ := uuid.Parse(license.ActivationId)
// TODO (nitya) : Add installation ID in the payload
payload := model.UsagePayload{
UsageBase: model.UsageBase{
ActivationId: activationId,
FailedSyncRequest: 0,
},
Metrics: *snap,
SnapshotDate: time.Now(),
}
err = lm.repository.InsertSnapshot(ctx, &payload)
if err != nil {
return err
}
return nil
}
func (lm *Manager) GetUsageFromClickHouse(ctx context.Context) (*model.UsageSnapshot, error) {
tableSizes := []TableSize{}
snap := model.UsageSnapshot{}
usages := []model.UsageDB{}
// get usage from clickhouse
dbs := []string{"signoz_logs", "signoz_traces", "signoz_metrics"}
query := `
SELECT
table,
disk_name,
sum(rows) as rows,
sum(data_uncompressed_bytes) AS uncompressed_bytes
FROM system.parts
WHERE active AND (database in ('signoz_logs', 'signoz_metrics', 'signoz_traces')) AND (table in ('logs','samples_v2', 'signoz_index_v2'))
GROUP BY
table,
disk_name
ORDER BY table
SELECT tenant, collector_id, exporter_id, timestamp, data
FROM %s.distributed_usage as u1
GLOBAL INNER JOIN
(SELECT
tenant, collector_id, exporter_id, MAX(timestamp) as ts
FROM %s.distributed_usage as u2
where timestamp >= $1
GROUP BY tenant, collector_id, exporter_id
) as t1
ON
u1.tenant = t1.tenant AND u1.collector_id = t1.collector_id AND u1.exporter_id = t1.exporter_id and u1.timestamp = t1.ts
order by timestamp
`
err := lm.clickhouseConn.Select(ctx, &tableSizes, query)
if err != nil {
return nil, err
}
for _, val := range tableSizes {
switch val.Table {
case "logs":
if val.DiskName == "default" {
snap.CurrentLogSizeBytes = val.UncompressedBytes
} else {
snap.CurrentLogSizeBytesColdStorage = val.UncompressedBytes
}
case "samples_v2":
if val.DiskName == "default" {
snap.CurrentSamplesCount = val.Rows
} else {
snap.CurrentSamplesCountColdStorage = val.Rows
}
case "signoz_index_v2":
if val.DiskName == "default" {
snap.CurrentSpansCount = val.Rows
} else {
snap.CurrentSpansCountColdStorage = val.Rows
}
for _, db := range dbs {
dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
return err
}
for _, u := range dbusages {
u.Type = db
usages = append(usages, u)
}
}
return &snap, nil
}
func (lm *Manager) UploadUsage(ctx context.Context) error {
snapshots, err := lm.repository.GetSnapshotsNotSynced(ctx)
if err != nil {
return err
}
if len(snapshots) <= 0 {
if len(usages) <= 0 {
zap.S().Info("no snapshots to upload, skipping.")
return nil
}
zap.S().Info("uploading snapshots")
for _, snap := range snapshots {
metricsBytes, err := encryption.Decrypt([]byte(snap.ActivationId.String()[:32]), []byte(snap.Snapshot))
zap.S().Info("uploading usage data")
usagesPayload := []model.Usage{}
for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil {
return err
}
metrics := model.UsageSnapshot{}
err = json.Unmarshal(metricsBytes, &metrics)
usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil {
return err
}
err = lm.UploadUsageWithExponentalBackOff(ctx, model.UsagePayload{
UsageBase: model.UsageBase{
Id: snap.Id,
InstallationId: snap.InstallationId,
ActivationId: snap.ActivationId,
FailedSyncRequest: snap.FailedSyncRequest,
},
SnapshotDate: snap.CreatedAt,
Metrics: metrics,
})
if err != nil {
return err
}
usageData.CollectorID = usage.CollectorID
usageData.ExporterID = usage.ExporterID
usageData.Type = usage.Type
usageData.Tenant = usage.Tenant
usagesPayload = append(usagesPayload, usageData)
}
key, _ := uuid.Parse(license.Key)
payload := model.UsagePayload{
LicenseKey: key,
Usage: usagesPayload,
}
err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
if err != nil {
return err
}
return nil
}
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
for i := 1; i <= MaxRetries; i++ {
apiErr := licenseserver.SendUsage(ctx, &payload)
apiErr := licenseserver.SendUsage(ctx, payload)
if apiErr != nil && i == MaxRetries {
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
if err != nil {
zap.S().Errorf("failed to updated the failure count for snapshot in DB : ", zap.Error(err))
return err
}
zap.S().Errorf("retries stopped : %v", zap.Error(err))
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
// not returning error here since it is captured in the failed count
return nil
} else if apiErr != nil {
@@ -289,24 +179,10 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
sleepDuration := RetryInterval * time.Duration(i)
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
time.Sleep(sleepDuration)
// update the failed request count
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
if err != nil {
zap.S().Errorf("failed to updated the failure count for snapshot in DB : %v", zap.Error(err))
return err
}
} else {
break
}
}
// update the database that it is synced
err := lm.repository.MoveToSynced(ctx, payload.Id)
if err != nil {
return err
}
return nil
}

View File

@@ -1,139 +0,0 @@
package repository
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/ee/query-service/usage/sqlite"
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
)
const (
MaxFailedSyncCount = 9 // a snapshot will be ignored if the max failed count is greater than or equal to 9
SnapShotLife = 3 * 24 * time.Hour
)
// Repository is usage Repository which stores usage snapshot in a secured DB
type Repository struct {
db *sqlx.DB
}
// New initiates a new usage Repository
func New(db *sqlx.DB) *Repository {
return &Repository{
db: db,
}
}
func (r *Repository) Init(engine string) error {
switch engine {
case "sqlite3", "sqlite":
return sqlite.InitDB(r.db)
default:
return fmt.Errorf("unsupported db")
}
}
func (r *Repository) InsertSnapshot(ctx context.Context, usage *model.UsagePayload) error {
snapshotBytes, err := json.Marshal(usage.Metrics)
if err != nil {
return err
}
usage.Id = uuid.New()
encryptedSnapshot, err := encryption.Encrypt([]byte(usage.ActivationId.String()[:32]), snapshotBytes)
if err != nil {
return err
}
query := `INSERT INTO usage(id, activation_id, snapshot)
VALUES ($1, $2, $3)`
_, err = r.db.ExecContext(ctx,
query,
usage.Id,
usage.ActivationId,
string(encryptedSnapshot),
)
if err != nil {
zap.S().Errorf("error inserting usage data: %v", zap.Error(err))
return fmt.Errorf("failed to insert usage in db: %v", err)
}
return nil
}
func (r *Repository) MoveToSynced(ctx context.Context, id uuid.UUID) error {
query := `UPDATE usage
SET synced = 'true',
synced_at = $1
WHERE id = $2`
_, err := r.db.ExecContext(ctx, query, time.Now(), id)
if err != nil {
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
return fmt.Errorf("failed to update usage in db: %v", err)
}
return nil
}
func (r *Repository) IncrementFailedRequestCount(ctx context.Context, id uuid.UUID) error {
query := `UPDATE usage SET failed_sync_request_count = failed_sync_request_count + 1 WHERE id = $1`
_, err := r.db.ExecContext(ctx, query, id)
if err != nil {
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
return fmt.Errorf("failed to update usage in db: %v", err)
}
return nil
}
func (r *Repository) GetSnapshotsNotSynced(ctx context.Context) ([]*model.Usage, error) {
snapshots := []*model.Usage{}
query := `SELECT id,created_at, activation_id, snapshot, failed_sync_request_count from usage where synced!='true' and failed_sync_request_count < $1 order by created_at asc `
err := r.db.SelectContext(ctx, &snapshots, query, MaxFailedSyncCount)
if err != nil {
return nil, err
}
return snapshots, nil
}
func (r *Repository) DropOldSnapshots(ctx context.Context) error {
query := `delete from usage where created_at <= $1`
_, err := r.db.ExecContext(ctx, query, time.Now().Add(-(SnapShotLife)))
if err != nil {
zap.S().Errorf("failed to remove old snapshots from db: %v", zap.Error(err))
return err
}
return nil
}
// CheckSnapshotGtCreatedAt checks if there is any snapshot greater than the provided timestamp
func (r *Repository) CheckSnapshotGtCreatedAt(ctx context.Context, ts time.Time) (bool, error) {
var snapshots uint64
query := `SELECT count() from usage where created_at > '$1'`
err := r.db.QueryRowContext(ctx, query, ts).Scan(&snapshots)
if err != nil {
return false, err
}
return snapshots > 0, err
}

View File

@@ -1,32 +0,0 @@
package sqlite
import (
"fmt"
"github.com/jmoiron/sqlx"
)
func InitDB(db *sqlx.DB) error {
var err error
if db == nil {
return fmt.Errorf("invalid db connection")
}
table_schema := `CREATE TABLE IF NOT EXISTS usage(
id UUID PRIMARY KEY,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
activation_id UUID,
snapshot TEXT,
synced BOOLEAN DEFAULT 'false',
synced_at TIMESTAMP,
failed_sync_request_count INTEGER DEFAULT 0
);
`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating usage table: %v", err.Error())
}
return nil
}

View File

@@ -58,7 +58,7 @@ module.exports = {
'react/no-array-index-key': 'error',
'linebreak-style': [
'error',
process.platform === 'win32' ? 'windows' : 'unix',
process.env.platform === 'win32' ? 'windows' : 'unix',
],
'@typescript-eslint/default-param-last': 'off',
@@ -102,9 +102,10 @@ module.exports = {
},
],
'@typescript-eslint/no-unused-vars': 'error',
'func-style': ['error', 'declaration', { allowArrowFunctions: true }],
'arrow-body-style': ['error', 'as-needed'],
// eslint rules need to remove
'no-shadow': 'off',
'@typescript-eslint/no-shadow': 'off',
'import/no-cycle': 'off',

View File

@@ -1,4 +1,4 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && npm run commitlint
cd frontend && yarn run commitlint --edit $1

1
frontend/.yarnrc Normal file
View File

@@ -0,0 +1 @@
network-timeout 600000

View File

@@ -9,8 +9,9 @@ ARG TARGETARCH
WORKDIR /frontend
# Copy the package.json to install dependencies
# Copy the package.json and .yarnrc files prior to install dependencies
COPY package.json ./
COPY .yarnrc ./
# Install the dependencies and make the folder
RUN CI=1 yarn install

View File

@@ -1,24 +1,20 @@
/* eslint-disable */
// @ts-ignore
// @ts-nocheck
const crypto = require('crypto');
const fs = require('fs');
const glob = require('glob');
function generateChecksum(str, algorithm, encoding) {
return crypto
.createHash(algorithm || 'md5')
.update(str, 'utf8')
.digest(encoding || 'hex');
return crypto
.createHash(algorithm || 'md5')
.update(str, 'utf8')
.digest(encoding || 'hex');
}
const result = {};
glob.sync(`public/locales/**/*.json`).forEach(path => {
const [_, lang] = path.split('public/locales');
const content = fs.readFileSync(path, { encoding: 'utf-8' });
result[lang.replace('.json', '')] = generateChecksum(content);
glob.sync(`public/locales/**/*.json`).forEach((path) => {
const [_, lang] = path.split('public/locales');
const content = fs.readFileSync(path, { encoding: 'utf-8' });
result[lang.replace('.json', '')] = generateChecksum(content);
});
fs.writeFileSync('./i18n-translations-hash.json', JSON.stringify(result));

View File

@@ -15,7 +15,7 @@ const config: Config.InitialOptions = {
useESM: true,
},
},
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
preset: 'ts-jest/presets/js-with-ts-esm',
transform: {
'^.+\\.(ts|tsx)?$': 'ts-jest',
@@ -25,6 +25,7 @@ const config: Config.InitialOptions = {
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'],
moduleDirectories: ['node_modules', 'src'],
testEnvironment: 'jest-environment-jsdom',
testEnvironmentOptions: {
'jest-playwright': {
browsers: ['chromium', 'firefox', 'webkit'],

View File

@@ -27,16 +27,13 @@
"author": "",
"license": "ISC",
"dependencies": {
"@ant-design/colors": "^6.0.0",
"@ant-design/icons": "^4.6.2",
"@ant-design/colors": "6.0.0",
"@ant-design/icons": "4.8.0",
"@grafana/data": "^8.4.3",
"@monaco-editor/react": "^4.3.1",
"@testing-library/jest-dom": "^5.11.4",
"@testing-library/react": "^11.1.0",
"@testing-library/user-event": "^12.1.10",
"@welldone-software/why-did-you-render": "^6.2.1",
"@xstate/react": "^3.0.0",
"antd": "4.19.2",
"ansi-to-html": "0.7.2",
"antd": "5.0.5",
"axios": "^0.21.0",
"babel-eslint": "^10.1.0",
"babel-jest": "^26.6.0",
@@ -44,7 +41,7 @@
"babel-plugin-named-asset-import": "^0.3.7",
"babel-preset-minify": "^0.5.1",
"babel-preset-react-app": "^10.0.0",
"chart.js": "^3.4.0",
"chart.js": "3.9.1",
"chartjs-adapter-date-fns": "^2.0.0",
"chartjs-plugin-annotation": "^1.4.0",
"color": "^4.2.1",
@@ -55,10 +52,12 @@
"d3-flame-graph": "^3.1.1",
"d3-tip": "^0.9.1",
"dayjs": "^1.10.7",
"dompurify": "3.0.0",
"dotenv": "8.2.0",
"event-source-polyfill": "1.0.31",
"file-loader": "6.1.1",
"flat": "^5.0.2",
"fontfaceobserver": "2.3.0",
"history": "4.10.1",
"html-webpack-plugin": "5.1.0",
"i18next": "^21.6.12",
@@ -70,17 +69,18 @@
"less-loader": "^10.2.0",
"lodash-es": "^4.17.21",
"mini-css-extract-plugin": "2.4.5",
"react": "17.0.0",
"react-dom": "17.0.0",
"react": "18.2.0",
"react-dom": "18.2.0",
"react-force-graph": "^1.41.0",
"react-graph-vis": "^1.0.5",
"react-grid-layout": "^1.3.4",
"react-i18next": "^11.16.1",
"react-intersection-observer": "9.4.1",
"react-query": "^3.34.19",
"react-redux": "^7.2.2",
"react-router-dom": "^5.2.0",
"react-use": "^17.3.2",
"react-vis": "^1.11.7",
"react-virtuoso": "4.0.3",
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"stream": "^0.0.2",
@@ -120,24 +120,28 @@
"@commitlint/config-conventional": "^16.2.4",
"@jest/globals": "^27.5.1",
"@playwright/test": "^1.22.0",
"@testing-library/react-hooks": "^7.0.2",
"@testing-library/jest-dom": "5.16.5",
"@testing-library/react": "13.4.0",
"@testing-library/user-event": "14.4.3",
"@types/color": "^3.0.3",
"@types/compression-webpack-plugin": "^9.0.0",
"@types/copy-webpack-plugin": "^8.0.1",
"@types/d3": "^6.2.0",
"@types/d3-tip": "^3.5.5",
"@types/dompurify": "^2.4.0",
"@types/event-source-polyfill": "^1.0.0",
"@types/flat": "^5.0.2",
"@types/fontfaceobserver": "2.1.0",
"@types/jest": "^27.5.1",
"@types/lodash-es": "^4.17.4",
"@types/mini-css-extract-plugin": "^2.5.1",
"@types/node": "^16.10.3",
"@types/react": "^17.0.0",
"@types/react-dom": "^16.9.9",
"@types/react": "18.0.26",
"@types/react-dom": "18.0.10",
"@types/react-grid-layout": "^1.1.2",
"@types/react-redux": "^7.1.11",
"@types/react-resizable": "3.0.3",
"@types/react-router-dom": "^5.1.6",
"@types/redux": "^3.6.0",
"@types/styled-components": "^5.1.4",
"@types/uuid": "^8.3.1",
"@types/vis": "^4.21.21",
@@ -145,6 +149,7 @@
"@types/webpack-dev-server": "^4.3.0",
"@typescript-eslint/eslint-plugin": "^4.28.2",
"@typescript-eslint/parser": "^4.28.2",
"@welldone-software/why-did-you-render": "6.2.1",
"autoprefixer": "^9.0.0",
"babel-plugin-styled-components": "^1.12.0",
"compression-webpack-plugin": "9.0.0",
@@ -173,7 +178,9 @@
"lint-staged": "^12.3.7",
"portfinder-sync": "^0.0.2",
"prettier": "2.2.1",
"react-hooks-testing-library": "0.6.0",
"react-hot-loader": "^4.13.0",
"react-resizable": "3.0.4",
"ts-jest": "^27.1.4",
"ts-node": "^10.2.1",
"typescript-plugin-css-modules": "^3.4.0",
@@ -186,7 +193,7 @@
]
},
"resolutions": {
"@types/react": "17.0.0",
"@types/react-dom": "17.0.0"
"@types/react": "18.0.26",
"@types/react-dom": "18.0.10"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -28,6 +28,7 @@
"condition_required": "at least one metric condition is required",
"alertname_required": "alert name is required",
"promql_required": "promql expression is required when query format is set to PromQL",
"chquery_required": "query is required when query format is set to ClickHouse",
"button_savechanges": "Save Rule",
"button_createrule": "Create Rule",
"button_returntorules": "Return to rules",
@@ -55,6 +56,7 @@
"button_formula": "Formula",
"tab_qb": "Query Builder",
"tab_promql": "PromQL",
"tab_chquery": "ClickHouse Query",
"title_confirm": "Confirm",
"button_ok": "Yes",
"button_cancel": "No",
@@ -88,5 +90,23 @@
"user_guide_pql_step3": "Step 3 -Alert Configuration",
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
"user_tooltip_more_help": "More details on how to create alerts"
"user_guide_ch_step1": "Step 1 - Define the metric",
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
"user_guide_ch_step2b": "Enter the Alert threshold",
"user_guide_ch_step3": "Step 3 -Alert Configuration",
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
"user_tooltip_more_help": "More details on how to create alerts",
"choose_alert_type": "Choose a type for the alert:",
"metric_based_alert": "Metric based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
"log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert",
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
"exceptions_based_alert": "Exceptions-based Alert",
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
}

View File

@@ -1,6 +1,7 @@
{
"create_dashboard": "Create Dashboard",
"import_json": "Import JSON",
"import_grafana_json": "Import Grafana JSON",
"copy_to_clipboard": "Copy To ClipBoard",
"download_json": "Download JSON",
"view_json": "View JSON",

View File

@@ -9,5 +9,5 @@
"tab_license_history": "History",
"loading_licenses": "Loading licenses...",
"enter_license_key": "Please enter a license key",
"license_applied": "License applied successfully, please refresh the page to see changes."
}
"license_applied": "License applied successfully"
}

View File

@@ -0,0 +1,3 @@
{
"search_tags": "Search Tag Names"
}

View File

@@ -28,6 +28,7 @@
"condition_required": "at least one metric condition is required",
"alertname_required": "alert name is required",
"promql_required": "promql expression is required when query format is set to PromQL",
"chquery_required": "query is required when query format is set to ClickHouse",
"button_savechanges": "Save Rule",
"button_createrule": "Create Rule",
"button_returntorules": "Return to rules",
@@ -55,6 +56,7 @@
"button_formula": "Formula",
"tab_qb": "Query Builder",
"tab_promql": "PromQL",
"tab_chquery": "ClickHouse Query",
"title_confirm": "Confirm",
"button_ok": "Yes",
"button_cancel": "No",
@@ -88,5 +90,23 @@
"user_guide_pql_step3": "Step 3 -Alert Configuration",
"user_guide_pql_step3a": "Set alert severity, name and descriptions",
"user_guide_pql_step3b": "Add tags to the alert in the Label field if needed",
"user_tooltip_more_help": "More details on how to create alerts"
"user_guide_ch_step1": "Step 1 - Define the metric",
"user_guide_ch_step1a": "Write a Clickhouse query for alert evaluation. Follow <0>this tutorial</0> to learn about query format and supported vars.",
"user_guide_ch_step1b": "Format the legends based on labels you want to highlight in the preview chart",
"user_guide_ch_step2": "Step 2 - Define Alert Conditions",
"user_guide_ch_step2a": "Select the threshold type and whether you want to alert above/below a value",
"user_guide_ch_step2b": "Enter the Alert threshold",
"user_guide_ch_step3": "Step 3 -Alert Configuration",
"user_guide_ch_step3a": "Set alert severity, name and descriptions",
"user_guide_ch_step3b": "Add tags to the alert in the Label field if needed",
"user_tooltip_more_help": "More details on how to create alerts",
"choose_alert_type": "Choose a type for the alert:",
"metric_based_alert": "Metric based Alert",
"metric_based_alert_desc": "Send a notification when a condition occurs in the metric data",
"log_based_alert": "Log-based Alert",
"log_based_alert_desc": "Send a notification when a condition occurs in the logs data.",
"traces_based_alert": "Trace-based Alert",
"traces_based_alert_desc": "Send a notification when a condition occurs in the traces data.",
"exceptions_based_alert": "Exceptions-based Alert",
"exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data."
}

View File

@@ -1,6 +1,7 @@
{
"create_dashboard": "Create Dashboard",
"import_json": "Import JSON",
"import_grafana_json": "Import Grafana JSON",
"copy_to_clipboard": "Copy To ClipBoard",
"download_json": "Download JSON",
"view_json": "View JSON",

View File

@@ -9,5 +9,5 @@
"tab_license_history": "History",
"loading_licenses": "Loading licenses...",
"enter_license_key": "Please enter a license key",
"license_applied": "License applied successfully, please refresh the page to see changes."
}
"license_applied": "License applied successfully"
}

View File

@@ -0,0 +1,3 @@
{
"search_tags": "Search Tag Names"
}

View File

@@ -6,7 +6,7 @@
"release_notes": "Release Notes",
"read_how_to_upgrade": "Read instructions on how to upgrade",
"latest_version_signoz": "You are running the latest version of SigNoz.",
"stale_version": "You are on an older version and may be losing out on the latest features we have shipped. We recommend to upgrade to the latest version",
"stale_version": "You are on an older version and may be missing out on the latest features we have shipped. We recommend to upgrade to the latest version",
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
"n_a": "N/A",
"routes": {

View File

@@ -1,11 +1,11 @@
/* eslint-disable react-hooks/exhaustive-deps */
import { notification } from 'antd';
import getLocalStorageApi from 'api/browser/localstorage/get';
import loginApi from 'api/user/login';
import { Logout } from 'api/utils';
import Spinner from 'components/Spinner';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import { useNotifications } from 'hooks/useNotifications';
import history from 'lib/history';
import React, { useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -47,6 +47,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const dispatch = useDispatch<Dispatch<AppActions>>();
const { notifications } = useNotifications();
const currentRoute = mapRoutes.get('current');
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
@@ -106,7 +108,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
} else {
Logout();
notification.error({
notifications.error({
message: response.error || t('something_went_wrong'),
});
}

View File

@@ -1,7 +1,12 @@
import { ConfigProvider } from 'antd';
import NotFound from 'components/NotFound';
import Spinner from 'components/Spinner';
import AppLayout from 'container/AppLayout';
import { useThemeConfig } from 'hooks/useDarkMode';
import { NotificationProvider } from 'hooks/useNotifications';
import { ResourceProvider } from 'hooks/useResourceAttribute';
import history from 'lib/history';
import { QueryBuilderProvider } from 'providers/QueryBuilder';
import React, { Suspense } from 'react';
import { Route, Router, Switch } from 'react-router-dom';
@@ -9,29 +14,37 @@ import PrivateRoute from './Private';
import routes from './routes';
function App(): JSX.Element {
return (
<Router history={history}>
<PrivateRoute>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => {
return (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
);
})}
const themeConfig = useThemeConfig();
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</PrivateRoute>
</Router>
return (
<ConfigProvider theme={themeConfig}>
<Router history={history}>
<NotificationProvider>
<PrivateRoute>
<ResourceProvider>
<QueryBuilderProvider>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
))}
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</QueryBuilderProvider>
</ResourceProvider>
</PrivateRoute>
</NotificationProvider>
</Router>
</ConfigProvider>
);
}

View File

@@ -57,6 +57,7 @@ const afterLogin = async (
profilePictureURL: payload.profilePictureURL,
userId: payload.id,
orgId: payload.orgId,
userFlags: payload.flags,
},
});

View File

@@ -1,6 +1,7 @@
const apiV1 = '/api/v1/';
export const apiV2 = '/api/v2/';
export const apiV3 = '/api/v3/';
export const apiAlertManager = '/api/alertmanager';
export default apiV1;

View File

@@ -7,8 +7,9 @@ import { PayloadProps, Props } from 'types/api/dashboard/create';
const create = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
const url = props.uploadedGrafana ? '/dashboards/grafana' : '/dashboards';
try {
const response = await axios.post('/dashboards', {
const response = await axios.post(url, {
...props,
});

View File

@@ -1,4 +1,4 @@
import axios from 'api';
import { ApiV2Instance as axios } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
@@ -8,9 +8,7 @@ const query = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(
`/variables/query?query=${encodeURIComponent(props.query)}`,
);
const response = await axios.post(`/variables/query`, props);
return {
statusCode: 200,

Some files were not shown because too many files have changed in this diff Show More