Compare commits

...

221 Commits

Author SHA1 Message Date
Srikanth Chekuri
e7e0f5b96a chore: pin version: SigNoz 0.16.2 2023-02-24 18:14:25 +05:30
Srikanth Chekuri
a26ebb742a chore: bump signoz/signoz-otel-collector version (#2378)
Merged on recommendation of @srikanthccv 

* chore: bump signoz/signoz-otel-collector version

* chore: bump everywhere
2023-02-24 17:34:00 +05:30
Amol Umbark
9d1305f174 fix: resolves alert charts issue with 1 hr timerame (#2377) 2023-02-24 15:09:30 +05:30
Amol Umbark
ab514cc0f2 fix: changed ask admin message (#2215) 2023-02-24 14:57:07 +05:30
palashgdev
1f44f089e0 feat: multiple values can be selected (#2365)
* feat: multiple values can be selected

* chore: tag value is updated

* fix: handle few edge cases

---------

Co-authored-by: makeavish <makeavish786@gmail.com>
2023-02-23 23:54:16 +05:30
Chintan Sudani
174fc107c2 fix: scrollbar issue on widget (#2359)
* fix: Removed Strict mode to stop render twice

* fix: scrollbar issue on widget
2023-02-23 17:06:57 +05:30
palashgdev
06a55ccdd6 chore: linebreak style is updated (#2277)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-02-23 13:10:41 +05:30
Srikanth Chekuri
a3731e4c4e fix: error rate as a percentage of range 0-100% (#2311) 2023-02-23 11:15:14 +05:30
Chintan Sudani
e183cace75 fix: null value handle on create dashboard (#2320)
* fix: Removed Strict mode to stop render twice

* fix: null value handle on create dashboard
2023-02-22 16:22:02 +05:30
Srikanth Chekuri
23490ca7f8 fix: operator should be IN for top level operations (#2304) 2023-02-22 12:10:32 +05:30
Nityananda Gohain
9f71e732c7 Merge pull request #2301 from SigNoz/feat/attribute-fix
fix: attribute name corrected in logs database
2023-02-22 09:44:23 +05:30
nityanandagohain
23d6287594 fix: attribute name corrected in logs database 2023-02-21 10:52:03 +05:30
palashgdev
2624ce4007 feat(FE): span Kind is added in the trace filter page (#2281) 2023-02-20 19:12:54 +05:30
palashgdev
3d5134b43c fix: width is added for the max content (#2292) 2023-02-20 17:28:38 +05:30
GitStart
c657f96032 fix: overflowing last timestamp (#2271)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-17 11:28:09 +05:30
GitStart
c18fff6ae8 FE: remove @types/redux (#2274)
* FE: remove @types/redux

* Update package.json

---------

Co-authored-by: gitstart <gitstart@users.noreply.github.com>
2023-02-17 11:04:51 +05:30
palashgdev
28142764af chore: testMatch is updated (#2270) 2023-02-15 18:32:40 +05:30
palashgdev
2fa265ff2e fix: onDrag is updated (#2267) 2023-02-15 16:03:53 +05:30
palashgdev
dca0b11acd fix: onSearch callback is updated (#2266) 2023-02-15 15:49:24 +05:30
volodfast
bad80def90 feat: add list and table views for logs (#2163)
* feat: add list and table views for logs

* chore: some of the changes are updated

* chore: some of the refactoring is done

* chore: px to updated to rem

* chore: constant is moved to local storage

* refactor: some of the refactoring is updated

* chore: some of the changes are updated

* fix: resize log table issue

* chore: logs is updated

* chore: resize header is updated

* chore: font observer is added in package json and hook is added for same

* chore: no logs text is updated

* chore: no logs text is updated

* chore: updated some feedback in raw logs line

* chore: types is added

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Chintan Sudani <csudani7@gmail.com>
2023-02-15 14:55:15 +05:30
Ankit Nayan
8965b9b503 Merge branch 'main' into develop 2023-02-15 14:22:06 +05:30
Kolesnyk Anton
05076968c9 fix: it has been fixed of difficult to click on metrics graph points (#2207)
* fix: it has been fixed of difficult to click on metrics graph points

* fix: resolve conflict

* fix: changed hover point & memoized the passed props

* fix: memo from develop

* fix: add condition for end and start stamps

* chore: type position is updated

---------

Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-15 10:50:39 +05:30
Prashant Shahi
7c8afc2e1c Merge pull request #2258 from SigNoz/release/v0.16.1
Release/v0.16.1
2023-02-15 01:47:43 +05:30
Prashant Shahi
c8a1a8600e chore: 📌 pin version: SigNoz 0.16.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-15 00:41:23 +05:30
Prashant Shahi
45cb1eb38f feat: introduce health check endpoint (#2257)
* feat(query-service):  Add health check route and handler

* chore(install-script): 🔧 use health endpoint with instead of services list

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-15 00:37:57 +05:30
Vishal Sharma
8ebb76bd0c fix: resource attribute tag key type is updated (#2231)
* fix: resource attribute tag key type is updated
from array to string

* chore: convert tag key from array to string
2023-02-14 10:48:47 +05:30
palashgdev
309ffa4989 chore: changes are updated for package.json (#2233) 2023-02-14 10:20:21 +05:30
Ankit Nayan
d787298600 Merge branch 'develop' of https://github.com/SigNoz/signoz into develop 2023-02-12 09:38:23 +05:30
GitStart
7998d474e2 FE: Create a single instance of notification in form of Context Provider and use it across whole app (#2196)
* feat: create notification context provider

* chore: import is updated to absolute import

---------

Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: Palash <palashgdev@gmail.com>
2023-02-12 08:53:00 +05:30
Pranay Prateek
7b8ff5a285 Update README.md 2023-02-12 00:29:14 +05:30
Ankit Nayan
cb22aef36f Merge pull request #2225 from SigNoz/release/v0.16.0
Release/v0.16.0
2023-02-11 23:52:58 +05:30
Ankit Nayan
cf93712286 Merge branch 'develop' into release/v0.16.0 2023-02-11 23:15:47 +05:30
Ankit Nayan
a906f94b8a chore: reduce events 2023-02-11 23:15:07 +05:30
Kolesnyk Anton
93b6749920 fix: filters applied in the logs page (#2210)
* fix: filters applied in the logs page

* fix: remove console

* fix: adding of query params from query string to input

* fix: added parser

* chore: useSearch parser is updated with previous hooks

---------

Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-11 08:39:34 +05:30
Amol Umbark
8ab527b174 feat: support printing threshold in alert summary and description (#1827) 2023-02-10 23:53:45 +05:30
Prashant Shahi
ad163c2b61 chore: 📌 pin versions: SigNoz 0.16.0, SigNoz OtelCollector 0.66.4
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-10 23:50:45 +05:30
Prashant Shahi
21f909f4c0 chore: 🔧 Add low cardinal exception grouping configuration
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-02-10 23:50:15 +05:30
palashgdev
b67206dd65 fix: graph component is memorised (#2223) 2023-02-10 18:02:37 +05:30
yun asny23
ce5afd31fd fix: indent spaces in yml (#1657) 2023-02-10 16:41:16 +05:30
palashgdev
9a184f5740 fix: dark mode is fixed (#2220) 2023-02-10 13:40:50 +05:30
Amol Umbark
be14f1c32c fix: removed direct ref to form item (#2221)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2023-02-10 12:29:41 +05:30
GitStart
ae37a608f8 fix: queries B and C coupling in a dashboard panel (#2218)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
2023-02-10 11:00:38 +05:30
Vishal Sharma
aaeb579d0d chore: remove external metrics to trace nav (#2213) 2023-02-09 16:00:48 +05:30
palashgdev
1151e8521e test: traceGraphFilter/utils selectedGroupByValue is added (#2201)
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-02-08 12:58:53 +05:30
Vishal Sharma
d779b83715 feat: navigate to trace from metrics (#2191)
* feat: navigate to trace from metrics

* chore: add sonar back

* chore: refactor
2023-02-08 12:41:55 +05:30
Fernando Pimenta
47a41473df Added support for installing SigNoz on RockyLinux (#2203)
Co-authored-by: Fernando Pimenta <fernandopimenta@tecnosys.com.br>
2023-02-08 10:35:52 +05:30
volodfast
de370d7f0c feat: increase chart point visibility (#2185) 2023-02-07 20:59:11 +05:30
volodfast
8a5b26cefe feat: highlight nearest in chart on hover (#2184)
Co-authored-by: palashgdev <palashgdev@gmail.com>
2023-02-07 16:55:33 +05:30
palashgdev
2a20b6fc86 fix: unit test is fixed with react 18 (#2199) 2023-02-07 16:42:49 +05:30
Vishal Sharma
02ef1744b4 feat: add autocomplete to groupBy filters (#2156)
* feat: add autocomplete to groupBy filters

* chore: handle none groupby in frontend

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-02-07 11:41:09 +05:30
palashgdev
2c973adf0b chore: removed react-vis dependecies (#2182) 2023-02-06 15:20:27 +05:30
Axay Sagathiya
f7ff491d35 Add error check in unit tests. (#1993) 2023-02-06 08:38:47 +05:30
Yash Joshi
6cd341a887 fix: redirect to latest tag release notes (#1970)
* fix: redirect to latest tag release notes

* chore: some refactoring is updated

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-02-03 21:51:18 +05:30
Palash Gupta
0832bce955 chore: some of the eslint rules disabling is removed and types are removed (#2152)
Co-authored-by: Chintan Sudani <46838508+csudani7@users.noreply.github.com>
2023-02-03 20:32:22 +05:30
Chintan Sudani
62b2462e03 feat: modified resize table component (#2175)
* fix: Removed Strict mode to stop render twice

* feat: modified resize table component
2023-02-03 18:06:26 +05:30
Chintan Sudani
152846f554 feat: Added Resizable Wrapper for Ant Design Table (#2014)
* feat: Added Resizable Wrapper for AntD Table

* chore: Merging upstream develop into fork

* chore: updated lock file

* fix: Lint issues resolved

* fix: Lint issues resolved

* fix: Types issues

* fix: linting issues

* fix: Types issues

* fix: POC of new resize lib

* fix: linting issues

* chore: resize is updated

* fix: added old lib logic

* fix: removed console.log

* chore: types are updated

* chore: removed un used style

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-02-02 16:53:15 +05:30
GitStart
846da08cbd refactor: antdv5 notfications (#2161)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: Nitesh Singh <nitesh.singh@gitstart.dev>
Co-authored-by: gitstart-app[bot] <57568882+gitstart-app[bot]@users.noreply.github.com>
Co-authored-by: Rubens Rafael <70234898+RubensRafael@users.noreply.github.com>
Co-authored-by: RubensRafael <rubensrafael2@live.com>
Co-authored-by: niteshsingh1357 <niteshsingh1357@gmail.com>
Co-authored-by: gitstart_bot <gitstart_bot@users.noreply.github.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-02-02 11:38:32 +05:30
Palash Gupta
17f32e9765 feat: global time is updated (#2013) 2023-02-02 11:12:12 +05:30
Chintan Sudani
48659a2957 fix: resolved violating issue on change of layout API call (#2164)
* fix: Removed Strict mode to stop render twice

* fix: resolved issue on change of layout API call
2023-02-02 10:52:14 +05:30
GitStart
a2a8a32d1c fix: different time formats in hover legend and x-axis on charts (#2040)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: niteshsingh1357 <niteshsingh1357@gmail.com>
Co-authored-by: Nitesh Singh <nitesh.singh@gitstart.dev>
Co-authored-by: gitstart-app[bot] <57568882+gitstart-app[bot]@users.noreply.github.com>
Co-authored-by: Rafael <rafael.toledo@engenharia.ufjf.br>
Co-authored-by: gitstart_bot <gitstart_bot@users.noreply.github.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-02-01 15:04:17 +05:30
ezio ruan
28f2ee2627 Update README.md (#2139) 2023-01-31 19:38:51 +05:30
Ankit Nayan
3b01bb2614 Merge pull request #2147 from SigNoz/release/v0.15.0
Release/v0.15.0
2023-01-31 16:58:45 +05:30
Prashant Shahi
622e1765cf Merge branch 'develop' into release/v0.15.0 2023-01-31 16:21:38 +05:30
Amol Umbark
faaf0a6e73 fix: solved re-render issue when input fields were edited (#2149)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2023-01-31 14:46:03 +05:30
Prashant Shahi
4542a51531 Merge branch 'main' into release/v0.15.0 2023-01-31 00:56:31 +05:30
Prashant Shahi
191a538430 chore: 📌 pin versions: SigNoz 0.15.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-31 00:22:47 +05:30
Prashant Shahi
e6ce80213b Merge branch 'develop' into release/v0.15.0 2023-01-31 00:20:08 +05:30
Palash Gupta
3115b32dcd fix: interval is blocked for custom time selection (#2146)
* fix: interval is blocked for custom time selection

* fix: custom is updated

* chore: selectedTime is updated in hidden logic
2023-01-30 19:27:13 +05:30
Chintan Sudani
af272a368b fix: added lazy loading on dashboard (#2133)
* fix: Removed Strict mode to stop render twice

* fix: added lazy loading on dashboard

* fix: suggested changes

* fix: added react-intersection-observer changes

* fix: resolved multiple time api call issue

* chore: variable name is updated

---------

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-30 18:32:05 +05:30
Palash Gupta
b336a6cb45 fix: widget options are now opening (#2141) 2023-01-30 18:06:49 +05:30
Fellipe Montes
b72815ca2f FIX: Exported dashboard include response of the queries #1981 (#2052)
* clear the queryData
* avoid creation of inline func and move logic to utils
* remove console.log
* fix
2023-01-30 16:07:23 +05:30
Amol Umbark
ed4a01dea6 fix: log issue remove field in query panel (#2130) 2023-01-27 13:27:59 +05:30
Vishal Sharma
1914c3b4a0 chore: update install message in install.sh script (#2131) 2023-01-27 12:53:23 +05:30
Prashant Shahi
3811e96e23 chore: 📌 pin versions: SigNoz OtelCollector 0.66.3 in standalone Docker
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-27 11:11:25 +05:30
Prashant Shahi
8d16493432 chore: 📌 pin versions: SigNoz OtelCollector 0.66.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-26 14:30:18 +05:30
Vishal Sharma
db2bfbb887 fix: tag filter query builder (#2125) 2023-01-26 01:18:19 +05:30
Chintan Sudani
213838a021 fix: Chart loaders on reload and change of time interval at dashboard (#2068)
* fix: Chart data logic on dashboard reloads

* fix: linting issues

* fix: added right side loader & css config

* fix: loader condition change

* fix: linting issues

* fix: error state of API

* fix: Resolved suggested changes

* fix: Error state for API Failed

* fix: Default loading state

* fix: linting issues

* fix: Suggested changes

* feat: Added common hook for previous value

* chore: usePrevious is made type safety

* chore: chart data set is updated

* chore: removed eslint rule

* fix: commitlint issue on commit

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-25 20:31:42 +05:30
Pranay Prateek
fd6f9a90e1 removing repostats workflow (#2053)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-25 20:13:57 +05:30
Prashant Shahi
13f9922c53 chore(frontend): 🔧 support ARM and copy yarnrc in Dockerfile (#2119)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-25 19:58:44 +05:30
Chintan Sudani
a654baaa5b fix: graph flickering issue on trace page (#2120)
* fix: Removed Strict mode to stop render twice

* fix: graph flickering issue on trace page
2023-01-25 19:55:33 +05:30
Palash Gupta
f766435acc feat: popover is added in the trace tag search (#2118)
* feat: popover is updated

* chore: arrow is removed and padding is removed

* chore: width is updated
2023-01-25 18:56:15 +05:30
Marius Kimmina
d7a65ba689 chore: remove not needed code comments (#2054)
Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-25 16:10:22 +05:30
Vishal Sharma
05ce03e67d feat: tag filtering frontend changes (#2116)
feat: tag filtering frontend changes
2023-01-25 15:20:27 +05:30
Palash Gupta
ba6818f487 fix: total count is usage explorer (#2117)
* fix: total count is usage explorer

* chore: no spans found is also wrapped under typography
2023-01-25 14:55:39 +05:30
Srikanth Chekuri
ca53136cbf feat(ui): dashboard variable chaining (#2037)
* feat: dashboard variable chaining

* feat(ui): dashboard variable chaining

* chore: update vars loading

* chore: fix lint

* chore: better dependent vars

* chore: multi dependent variables

* chore: add more user friendly error

* chore: review comments

* chore: address review comments

* chore: remove string assertion

* chore: fix build by updating types

* chore: fix the variable data auto loading

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-25 13:22:57 +05:30
Vishal Sharma
c46bef321c feat: tag filter backend changes (#2115) 2023-01-25 12:35:44 +05:30
Palash Gupta
ba8f804b26 fix: yarnrc is added in the root of the frontend (#2114)
Co-authored-by: Prashant Shahi <prashant@signoz.io>
2023-01-25 12:11:22 +05:30
Chintan Sudani
6cc7025e37 fix: Chart is not updating on change of variables (#2020)
* fix: Chart is not updating onchange of variables

* fix: Added useLocation hook for pathname

* fix: Lint issues resolved

* fix: Updated logic behind change of variables

* fix: Suggested changes of variable

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-25 10:54:36 +05:30
Palash Gupta
e62e541fc4 FE: added more eslint rule (#2090)
* chore: arrow-body-style func-style is added in the rule

* fix: linting issues fixed

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-24 18:53:04 +05:30
Palash Gupta
2f1ca93eda fix: tags is grabbed from the local state (#2106) 2023-01-24 17:42:48 +05:30
Priyanka Chakraborty
f1c7d72fc5 1375 overview querybuilder (#1983) 2023-01-24 09:30:26 +05:30
Chintan Sudani
a405307c96 fix: Redundant call on resize or rearrange layout on dashboard (#2099)
* fix: Removed Strict mode to stop render twice

* fix: Redundant call on resize or rearrange layout on dashboard

* fix: Resolved suggested changes

* fix: Resolved suggested changes

* chore: some of the refactoring is updated

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-23 20:21:24 +05:30
Ram S Gupta
c85d48d7fa remove no-shadow:off rules from eslint rule list (#2093)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-23 17:15:18 +05:30
Chintan Sudani
75470f6bb9 fix: Removed Strict mode to stop render twice (#2097) 2023-01-23 17:01:45 +05:30
Palash Gupta
f75e688b32 feat: text is handled under light and dark mode (#2087) 2023-01-23 10:40:27 +05:30
Vishal Sharma
5f3ca045df fix: dockerfile clickhouse indentation issue (#2083) 2023-01-20 00:09:46 +05:30
Chintan Sudani
186632af69 fix: Changed Legends UI & Scrollable (#2078)
* fix: Changed Legends UI & Scrollable

* fix: Changed axis label color

* fix: Changed Legends UI & Scrollable

* chore: Removed other issues changes

* fix: linting issues

* fix: changed fontsize of legend

* fix: changed height of legend

* chore: px is updated to rem

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-18 19:53:45 +05:30
Chintan Sudani
fa652be926 fix: Changed axis label color (#2080)
* fix: Changed axis label color

* fix: linting issues

* chore: helpers is updated

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-18 19:40:15 +05:30
volodfast
1e39131c38 feat: drag select timeframe on charts (#2018)
* feat: add drag select functionality to chart

* fix: use redux stored values for time frame selection

* fix: ignore clicks on chart without dragging

* feat: add intersection cursor to chart

* refactor: update drag-select chart plugin

* fix: respond to drag-select mouseup outside of chart

* fix: remove unnecessary chart update

* feat: add drag-select to dashboard charts

* refactor: add util functions to create custom plugin options

* fix: enable custom chart plugins

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2023-01-17 17:00:34 +05:30
Ankit Nayan
153e859ac3 Fix/analytics (#2049)
* fix: incorrect calculation
* chore: adding nil check

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-17 15:28:58 +05:30
Fellipe Montes
d1cc29e118 Create: Widget Header in the Loading State #2042 (#2048)
* create a visual loading state with header

* updates loading with WidgetHeader component

* chore: onview and ondelete is updated

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-17 11:37:30 +05:30
Priyanka Chakraborty
972bf94dd0 refactor: tagFilteritems-refactored (#2056)
* refactor: tagFilteritems-refactored

* refactor: wrapper-over-getwidget

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-16 18:05:13 +05:30
Palash Gupta
3632208d45 Fix: live tail memory (#2033)
* feat: react is updated to v18

* feat: logs card is updated
2023-01-16 17:56:46 +05:30
Srikanth Chekuri
cd9768c738 feat: dashboard variable chaining (#2036) 2023-01-16 14:57:04 +05:30
Pranay Prateek
f01b9605db Update README.md 2023-01-16 13:03:15 +05:30
GitStart
eec236af50 Add visual feedback on Copy JSON in Log filter page (#2055)
Co-authored-by: gitstart <gitstart@users.noreply.github.com>
Co-authored-by: niteshsingh1357 <niteshsingh1357@gmail.com>
Co-authored-by: Nitesh Singh <nitesh.singh@gitstart.dev>
Co-authored-by: Thiago Nascimbeni <tnascimbeni@gmail.com>
Co-authored-by: gitstart_bot <gitstart_bot@users.noreply.github.com>
2023-01-16 12:03:35 +05:30
Fellipe Montes
bbff2b459e Fix: Invite links do not work if name is not given when creating the invite #2008 (#2026) 2023-01-13 21:37:36 +05:30
Chintan Sudani
d9535e7a8d fix: Trigger Save layout only on title (#2039)
* fix: Trigger Save layout only on title

* chore: code improvement

* fix: Lint issues resolved

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-13 17:29:51 +05:30
volodfast
a82bbe1a72 chore: update chartjs to version 3.9.1 (#2041) 2023-01-13 17:07:28 +05:30
Fellipe Montes
6812f55152 change CSS and isEllipsed variable (#2035)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-13 14:07:23 +05:30
Chintan Sudani
83163c17cd fix: Added Extra color code to stop repeat same color (#2015) 2023-01-13 13:50:11 +05:30
Palash Gupta
5ed7c9a46e feat: react is updated to v18 (#2030) 2023-01-13 12:01:46 +05:30
Ankit Nayan
2f323056d0 Merge pull request #2034 from SigNoz/release/v0.14.0
Release/v0.14.0
2023-01-12 18:55:14 +05:30
Prashant Shahi
51b583480b chore: 📌 pin versions: SigNoz 0.14.0, SigNoz OtelCollector 0.66.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-12 17:56:29 +05:30
Srikanth Chekuri
7b1e2c8b98 fix: use target arch amd64 (#2027) 2023-01-12 11:27:48 +05:30
Srikanth Chekuri
b87f3bdb50 fix: query builder formula fails to eval (#1999)
* fix: query builder formula fails to eval

* fix: result label set without reference

* chore: update tests

Co-authored-by: Prashant Shahi <prashant@signoz.io>
2023-01-11 16:12:47 +05:30
Palash Gupta
2f5908a3dd feat: antd is updated from v4 to v5 (#2012)
* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* Update index.tsx

* fix: import is fixed

* chore: un used import is fixed

* fix: dark mode is updated in service map

* fix: config dropdown is updated

* fix: logs types is updated

* fix: copy clipboard notification is updated

* chore: layout changes are updated

* chore: colors is updated

* chore: action width is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-11 14:39:06 +05:30
Yash Joshi
ca77820e9d refactor: use antd form in organization display name (#2006)
* refactor: use antd form in organization display name

* chore: interface is now named interface

Co-authored-by: Palash <palashgdev@gmail.com>
2023-01-11 00:59:45 +05:30
Marius Kimmina
a4346a2d93 fix(FE): show no No Data on default Dashboards (#2003)
* fix(FE): show no No Data on default Dashboards

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>

* chore: removed un used styles

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2023-01-10 23:54:14 +05:30
Srikanth Chekuri
44360ecacf Add support for histogram quantiles (#1533) 2023-01-10 21:42:44 +05:30
Srikanth Chekuri
b675c3cfec fix: add signoz.collector.id to spanmetrics dimensions (#2001)
* fix: add service.instance.id to spanmetrics dimensions

* chore: update description

* chore: update the resource key
2023-01-10 19:21:17 +05:30
Marius Kimmina
b23d8da96c style: use 'no data' for empty graphs (#2002)
* style: use 'No Data' for empty graphs

* style: use 'No data' for empty graphs

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>

Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2023-01-10 10:44:59 +05:30
Ankit Nayan
215ea8d819 chore: different ticker interval for active user 2023-01-08 23:12:02 +05:30
Ankit Nayan
0c27d5acbc chore: better error handling 2023-01-08 22:49:11 +05:30
Ankit Nayan
435d74c37e Merge pull request #1996 from SigNoz/release/v0.13.1
Release/v0.13.1
2023-01-07 20:56:08 +05:30
Prashant Shahi
b35bdf01cc chore: 📌 pin versions: SigNoz 0.13.1 2023-01-07 18:22:02 +05:30
Ankit Nayan
9b654143bb chore: update latest loggedin user 2023-01-07 02:54:27 +05:30
Ankit Nayan
4841f150f4 fix: minor changes 2023-01-07 02:31:54 +05:30
Ankit Nayan
16a49a8b04 fix: minor changes 2023-01-07 02:21:44 +05:30
Ankit Nayan
1fd819b806 fix: added ratelimit to specific event 2023-01-07 00:16:57 +05:30
Ankit Nayan
cab9e04cdd fix: concurrent writes to map 2023-01-06 16:10:13 +05:30
Ankit Nayan
e8f341b850 Revert "feat: antdv5 is updated (#1880)" (#1991)
This reverts commit 7b86022280.
2023-01-06 13:40:31 +05:30
Ankit Nayan
1f6fcb9b8c Revert "feat: react is updated to v18 (#1948)" (#1990)
This reverts commit 1c7202b5bf.
2023-01-06 13:32:27 +05:30
Palash Gupta
1c7202b5bf feat: react is updated to v18 (#1948)
* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* feat: react is updated from v17 to v18

* feat: antdv5 is updated (#1880)

* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* Update index.tsx

* fix: import is fixed

* chore: un used import is fixed

* fix: dark mode is updated in service map

* fix: config dropdown is updated

* fix: logs types is updated

* fix: copy clipboard notification is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>

* chore: all channel is updated move from usefetch to usequery

* fix: typescript is fixed

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2023-01-04 22:58:05 +05:30
Ankit Nayan
24ac062bf5 Fix/analytics (#1987)
* fix: added server code to ee
2023-01-04 22:48:38 +05:30
Axay Sagathiya
b776bf5b09 Add Docs to install SQLite3 (#1924)
* add commands to install sqlite3 in Makefile.

* Add code to check if it's running on Linux System.

* Revert "Add code to check if its running on Linux"

This reverts commit 552cfb08c9.

* Revert "add commands to install sqlite3 in Makefi"

This reverts commit 781c23d12d.

* Add Docuentation to install SQLite3.

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-04 22:02:48 +05:30
Yash Joshi
144076e029 fix: disable button unless org name is different (#1984) 2023-01-04 18:20:02 +05:30
Vishal Sharma
835251b342 fix: use rpc method and responseStatusCode (#1971)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-04 16:15:08 +05:30
Prashant Shahi
ebbad5812f ci: 👷 fix testing and staging deployments (#1980)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-04 14:33:52 +05:30
Palash Gupta
7b86022280 feat: antdv5 is updated (#1880)
* feat: v5 is in progress

* feat: antdv5 is updated

* fix: build is fixed

* fix: default config is over written by custom one

* chore: onchange handler is updated

* chore: overflow is hidden in the layout

* Update index.tsx

* fix: import is fixed

* chore: un used import is fixed

* fix: dark mode is updated in service map

* fix: config dropdown is updated

* fix: logs types is updated

* fix: copy clipboard notification is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2023-01-04 12:48:12 +05:30
Prashant Shahi
da1fd4b0cd ci(deployments): workflows for staging and testing deployments and related changes (#1933)
* chore(Makefile): ️ remove no-cache from all docker build commands
* chore(Makefile): 🔧 update target name
* feat(docker-standalone):  introduce tag environment variables for easy custom deployments
* ci(deployments): 👷 workflows for staging and testing deployments
* ci(deployments): 👷 pass DEV_BUILD env to remote host
2023-01-03 22:28:48 +05:30
Prashant Shahi
57d28be9f5 fix: 🐛 resolve redundant metrics issue (#1946)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2023-01-03 10:55:58 +05:30
Palash Gupta
126c9238ba feat: loading is added in the button (#1927)
* feat: loading is added in the button

* chore: disable condition is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2023-01-02 12:08:35 +05:30
Pranay Prateek
31a3bc09c8 Removing Beta tag from Logs (#1952) 2022-12-31 11:11:48 +05:30
Vishal Sharma
6ba5c0ecad fix: apply filters on count of exceptions (#1945) 2022-12-30 16:46:13 +05:30
Palash Gupta
27cd514fa5 fix: Logs double api is called (#1947) 2022-12-30 13:59:02 +05:30
Yash Joshi
f0e13784e5 fix(sidebar): highlight active feature in nested route (#1929)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-30 01:10:02 +05:30
Yash Joshi
742ceac32c fix(logs): prevent duplicate logs dispatch (#1934)
* fix(logs): prevent duplicate logs dispatch

* refactor: use useMountedstate hook

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-12-30 00:51:53 +05:30
Ankit Nayan
545d46c39c Merge pull request #1943 from SigNoz/release/v0.13.0
Release/v0.13.0
2022-12-29 17:32:15 +05:30
Prashant Shahi
d134e4f4d9 chore: 📌 pin versions: SigNoz 0.13.0, SigNoz OtelCollector 0.66.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-29 14:27:24 +05:30
Ankit Nayan
e03b0aa45f chore/analytics (#1939)
* fix: not capturing empty filters

* feat: removing signoz_ metrics using grep

* fix: initialise companyDomain

* feat: added ttl status
2022-12-29 01:14:57 +05:30
Vishal Sharma
46e131698e fix: exception filter clear (#1936) 2022-12-28 17:48:39 +05:30
Ankit Nayan
d1ee15c372 fix: nil pointer 2022-12-28 15:30:24 +05:30
Ankit Nayan
1e035be978 Merge branch 'develop' into chore/analytics 2022-12-28 15:26:59 +05:30
Vishal Sharma
88a97fc4b8 add exception page filters support (#1919)
* feat: backend changes for supporting exception filters

* feat: frontend changes for exception page filter support

* chore: extractSingleFilterValue is updated

* fix: handle frontend edge case

Co-authored-by: Ankit Nayan <ankit@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-28 14:54:15 +05:30
Nityananda Gohain
2e58f6db7a fix: error handling for index removal from selected field (#1935)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-28 14:31:57 +05:30
Amol Umbark
1916fc87b0 fix: added clear filters button (#1920)
* fix: added clear filters button

* fix: removed console log


Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-28 14:30:37 +05:30
Ankit Nayan
d8882acdd7 fix: changed or to and 2022-12-28 02:34:07 +05:30
Ankit Nayan
7f42b39684 fix: changed or to and 2022-12-28 02:33:21 +05:30
Ankit Nayan
b11f79b4c7 Chore/analytics (#1922)
* fix: reduced rate limit to 2 of each events in 1 min

* feat: added new event for length of filters in logs search page

* feat: added distributed cluster info

* fix: length of filters in logs

* feat: dashboard metadata with no rateLimit

* feat: active user

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-28 02:16:46 +05:30
Ankit Nayan
c717e39a1a Merge branch 'chore/analytics' of https://github.com/SigNoz/signoz into chore/analytics 2022-12-28 02:10:36 +05:30
Ankit Nayan
c3253687d0 feat: active user 2022-12-28 02:09:44 +05:30
Yash Joshi
895c721b37 fix(version): use link instead of click handler (#1931)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-27 23:13:13 +05:30
Vishal Sharma
35f5fb6957 fix: respect durationSort feature flag on getSpanFilters API (#1900)
* fix: respect durationSort feature flag on getSpanFilters API

* chore: update DB query
2022-12-27 21:09:36 +05:30
Palash Gupta
40ec4517c2 fix: per page is added in the dependancy (#1926) 2022-12-27 19:01:56 +05:30
Srikanth Chekuri
48a6f536fa chore: increase dimensions_cache_size for signozspanmetrics processor (#1925) 2022-12-27 15:44:39 +05:30
Palash Gupta
13a6d7f7c6 fix: live tail time out is updated (#1899)
* fix: live tail time out is updated
* Update livetail.ts

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-27 13:36:37 +05:30
Srikanth Chekuri
8b6ed0f951 Merge branch 'develop' into chore/analytics 2022-12-27 12:21:51 +05:30
Srikanth Chekuri
eef48c54f8 fix(query_range): invalid memory address or nil pointer dereference (#1875)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-27 11:28:15 +05:30
Ankit Nayan
aad962d07d feat: dashboard metadata with no rateLimit 2022-12-27 01:10:01 +05:30
Ankit Nayan
18bbb3cf36 fix: length of filters in logs 2022-12-26 23:10:55 +05:30
Ankit Nayan
a3455fb553 feat: added distributed cluster info 2022-12-26 23:01:54 +05:30
Ankit Nayan
ece2988d0d feat: added new event for length of filters in logs search page 2022-12-26 22:11:23 +05:30
Ankit Nayan
db704b212d fix: reduced rate limit to 2 of each events in 1 min 2022-12-26 21:52:54 +05:30
Amol Umbark
4b13b0a8a4 fix: resolves issue related ops not flowing from search box to panel (#1918) 2022-12-26 20:31:50 +05:30
Palash Gupta
6f6499c267 fix: flush logs before starting (#1912)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:25:55 +05:30
Prashant Shahi
3dcb44a758 fix docker-compose for swarm and related changes for distributed clickhouse (#1863)
* chore: 🔧 fix docker-compose.yaml for swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🔧 add .gitkeep files for docker and swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:16:47 +05:30
Palash Gupta
0595cdc7af fix: scroll is added (#1873)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:14:54 +05:30
Palash Gupta
092c02762f feat: add no found with no events are present (#1874)
* chore: not found component is updated
* feat: no events handling is updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:14:17 +05:30
Palash Gupta
d1d2829d2b fix: logs issues (#1889)
* changed debounce interval to 600ms

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 16:45:28 +05:30
Palash Gupta
ac446294e7 fix: logs selection of filter is fixed (#1910)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 16:20:34 +05:30
Marius Kimmina
1cceab4d5e fix(FE): remove unnecessary complexity from password check (#1904)
Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
2022-12-26 16:02:18 +05:30
Ankit Nayan
02898d14f9 fix: removes password validations other than length (#1909) 2022-12-26 15:42:08 +05:30
Nityananda Gohain
09af6c262c fix: proxy_read_timeout updated in nginx conf (#1885)
* fix: proxy_read_timeout updated in nginx conf
* fix: live tail endpoint-flush the headers first

Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 15:29:49 +05:30
Amol Umbark
faeaeb61a0 fix: added validations on query builder (#1906)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 15:10:01 +05:30
Nityananda Gohain
9c80ba6b78 fix: allow multiple spaces between a filter expression (#1897)
* fix: allow multiple spaces between a filter expression

* fix: regex updated to respect spaces between a search string


Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-26 15:08:43 +05:30
Palash Gupta
dbba8b5b55 feat: event time is updated when root span is missing 2022-12-22 17:35:20 +05:30
Pranay Prateek
58ce838023 chore: Updating stale edition message (#1896) 2022-12-22 11:44:28 +05:30
Srikanth Chekuri
5260b152f5 fix: do not show result of sub queries in external calls (#1858) 2022-12-20 19:54:27 +05:30
Ankit Nayan
f2dd254d83 Merge pull request #1849 from SigNoz/release/v0.12.0
Release/v0.12.0
2022-12-11 00:14:59 +05:30
Prashant Shahi
82d53fa45c chore: 📌 pin versions: SigNoz 0.12.0, SigNoz OtelCollector 0.66.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-10 20:17:49 +05:30
Zsombor
c38d1c150d Fix case sensitivity in query parsing (#1670)
* Fix case sensitivity in query parsing - now the parser correctly recognize fields which contains uppercase letters

* fix: logs parser respects the case of fields

Co-authored-by: nityanandagohain <nityanandagohain@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-10 19:27:57 +05:30
Srikanth Chekuri
16170eacc0 Revert "chore: use local table for innery query (#1815) (#1847)
* Revert "chore: use local table for innery query (#1815)"

This reverts commit 1b52edb056.

* chore: use localhost
2022-12-10 19:25:44 +05:30
Amol Umbark
66ddbfc085 fix: solves issue legend update causing null ch query (#1845)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-10 12:21:20 +05:30
Vishal Sharma
2715ab61a4 chore: introduce docker_multi_node_cluster and by default set to false (#1839)
* chore: introduce docker_multi_node_cluster and by default set to false

* chore(query-service): 🔧 include docker_multi_node_cluster for tests

Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Prashant Shahi <me@prashantshahi.dev>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 21:57:25 +05:30
Amol Umbark
4d291e92b9 fix: changed table names in default alert queries (#1843)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2022-12-09 21:54:51 +05:30
Nityananda Gohain
1b73649f8e fix: add default value for materialized column in distributed logs table (#1835)
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 20:18:58 +05:30
Amol Umbark
0abae1c09c feat: show release note in alerts dashboards and services pages (#1840)
* feat: show release note in alerts dashboards and services pages

* fix: made code changes as per review and changed message in release note

* fix: solved build pipeline issue

* fix: solved lint issue

Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 20:16:09 +05:30
Pranay Prateek
4d02603aed Update README.md 2022-12-09 14:01:05 +05:30
Pranay Prateek
c58e43a678 Update README.md 2022-12-09 12:54:34 +05:30
Pranay Prateek
b77bbe1e4f Update README.md 2022-12-09 12:50:41 +05:30
Pranay Prateek
d4eb241c04 Update README.md 2022-12-09 12:48:57 +05:30
Pranay Prateek
98e1a77a43 Update README.md 2022-12-09 12:48:30 +05:30
Pranay Prateek
498b04491b updated logs image 2022-12-09 12:42:25 +05:30
Pranay Prateek
4e58414cc2 Update README.md 2022-12-09 12:36:05 +05:30
Pranay Prateek
67943cfec0 Update README.md 2022-12-09 12:32:03 +05:30
Palash Gupta
f170eb1b23 fix: scroll is added in case of extra space (#1838) 2022-12-09 10:00:55 +05:30
Vishal Sharma
6931b18382 fix: remove shared variable in TTL and async TTL queries (#1821)
* fix: remove shared variable in TTL

* fix: set distributed_ddl_task_timeout to 0 for async TTL

* chore: updated distributed_ddl_task_timeout to sync TTL queries
2022-12-07 18:23:01 +05:30
Prashant Shahi
8a9d6f664a fix: 🐛 log parsing issue (#1824)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2022-12-07 17:57:55 +05:30
Amol Umbark
8affe8df31 fix: solved issue with google help link (#1826) 2022-12-07 16:10:17 +05:30
Nityananda Gohain
1c8626e933 feat: usage collection updated for ee (#1654)
* feat: usage collection updated with new schema and logic

* fix: added exporter id and common collector id

* fix: upload usage only when license is present

* fix: handle if db doesn't exists

* fix: select query updated for usage collection to support distributed table

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:52:39 +05:30
Amol Umbark
87932de668 [feat] ee/google auth implementation (#1775)
* [feat] initial version for google oauth

* chore: arranged the sso packages and added prepare request for google auth

* feat: added google auth config page and backend to handle the request

* chore: code cleanup for domain SSO parsing

* Update constants.go

* chore: moved redirect sso error

* chore: lint issue fixed with domain

* chore: added tooltip for enforce sso and few changes to auth domain

* chore: moved question mark in enforce sso

* fix: resolved pr review comments

* chore: fixed type check for saml config

* fix: fixed saml config form

* chore: added util for transformed form values to samlconfig

Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:32:59 +05:30
Srikanth Chekuri
1b52edb056 chore: use local table for innery query (#1816)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:31:38 +05:30
Priyanka Chakraborty
5a81557df7 1374 dbcalls querybuilder (#1608)
* refactor: dbcalls-fromPromql-querybuilder


Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 16:52:20 +05:30
Prashant Shahi
8bb3eefeb5 chore(clickhouse): 🔧 include cluster.xml for distributed set up (#1810)
* chore(clickhouse): 🔧 include cluster.xml for distributed set up

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-05 17:26:13 +05:30
Amol Umbark
a46f074e22 fix: resolves empty variables issue for imported dashboards (#1808) 2022-12-05 16:48:11 +05:30
Prashant Shahi
88fa3b7699 feat(distributed): create single docker-compose.yaml and CH configuration (#1803)
* feat: setup for distributed clickhouse

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-05 16:24:01 +05:30
Ankit Nayan
7f77bcca2b Feat/distributed ch (#1701)
* feat: support for distributed table

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-02 12:30:28 +05:30
Palash Gupta
ab5311caac feat: events is updated by adding the timestamp (#1802)
* feat: events is updated
* chore: title is updated
* feat: trace detail event timestamp is updated
2022-12-02 10:34:06 +05:30
Palash Gupta
8aae9f53a9 feat: search in tags is updated (#1788)
* feat: search in tags is updated

* chore: placeholder is updated
2022-12-01 14:19:12 +05:30
Ankit Nayan
18d80d47e5 Merge pull request #1776 from SigNoz/release/v0.11.4
Release/v0.11.4
2022-11-29 17:36:31 +05:30
463 changed files with 12886 additions and 20105 deletions

2
.github/config.yml vendored
View File

@@ -17,7 +17,7 @@ newPRWelcomeComment: >
# Comment to be posted to on pull requests merged by a first time user
firstPRMergeComment: >
Congrats on merging your first pull request!
![minion-party](https://i.imgur.com/Xlg59lP.gif)
We here at SigNoz are proud of you! 🥳

View File

@@ -32,6 +32,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Run tests
shell: bash
run: |
make test
- name: Build query-service image
shell: bash
run: |

View File

@@ -57,7 +57,7 @@ jobs:
--set frontend.service.type=LoadBalancer \
--set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl get pods -n platform
kubectl get svc -n platform

View File

@@ -17,4 +17,3 @@ jobs:
uses: hattan/verify-linked-issue-action@v1.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -11,6 +11,6 @@ jobs:
- name: Remove label
uses: buildsville/add-remove-label@v1
with:
label: ok-to-test
label: ok-to-test,testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,25 +0,0 @@
on:
schedule:
# Run this once per day, towards the end of the day for keeping the most
# recent data point most meaningful (hours are interpreted in UTC).
- cron: "0 8 * * *"
workflow_dispatch: # Allow for running this manually.
jobs:
j1:
name: repostats
runs-on: ubuntu-latest
steps:
- name: run-ghrs
uses: jgehrcke/github-repo-stats@v1.1.0
with:
# Define the stats repository (the repo to fetch
# stats for and to generate the report for).
# Remove the parameter when the stats repository
# and the data repository are the same.
repository: signoz/signoz
# Set a GitHub API token that can read the stats
# repository, and that can push to the data
# repository (which this workflow file lives in),
# to store data and the report files.
ghtoken: ${{ github.token }}

View File

@@ -24,4 +24,3 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

View File

@@ -0,0 +1,38 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
on:
push:
branches:
- develop
jobs:
deploy:
name: Deploy latest develop branch to staging
runs-on: ubuntu-latest
environment: staging
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.6
env:
GITHUB_BRANCH: develop
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.EC2_SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-signoz

View File

@@ -0,0 +1,39 @@
name: testing-deployment
# Trigger deployment only on testing-deploy label on pull request
on:
pull_request:
types: [labeled]
jobs:
deploy:
name: Deploy PR branch to testing
runs-on: ubuntu-latest
environment: testing
if: ${{ github.event.label.name == 'testing-deploy' }}
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.6
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.EC2_SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-signoz

View File

@@ -215,9 +215,26 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
## 4.1 To run ClickHouse setup (recommended for local development)
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```

View File

@@ -45,7 +45,7 @@ build-frontend-amd64:
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
@@ -54,7 +54,7 @@ build-push-frontend:
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
docker buildx build --file Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of query service
@@ -65,7 +65,7 @@ build-query-service-amd64:
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
@@ -73,7 +73,7 @@ build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane \
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
@@ -84,11 +84,11 @@ build-ee-query-service-amd64:
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
else \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
fi
@@ -98,7 +98,7 @@ build-push-ee-query-service:
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
--progress plane --push --platform linux/arm64,linux/amd64 \
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
@@ -119,16 +119,22 @@ down-local:
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v
run-x86:
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
run-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
down-x86:
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
test:
go test ./pkg/query-service/app/metrics/...

View File

@@ -23,7 +23,9 @@
##
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. With SigNoz, you can:
👉 Visualise Metrics, Traces and Logs in a single pane of glass
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
@@ -31,11 +33,17 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
👉 Run aggregates on trace data to get business relevant metrics
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
![screenzy-1670570187181](https://user-images.githubusercontent.com/504541/206646629-829fdafe-70e2-4503-a9c4-1301b7918586.png)
<br />
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
![screenzy-1670570193901](https://user-images.githubusercontent.com/504541/206646676-a676fdeb-331c-4847-aea9-d1cabf7c47e1.png)
<br />
![screenzy-1647005040573](https://user-images.githubusercontent.com/504541/157875938-a3d57904-ea6d-4278-b929-bd1408d7f94c.png)
![screenzy-1670570199026](https://user-images.githubusercontent.com/504541/206646754-28c5534f-0377-428c-9c6e-5c7c0d9dd22d.png)
<br />
![screenzy-1670569888865](https://user-images.githubusercontent.com/504541/206645819-1e865a56-71b4-4fde-80cc-fbdb137a4da5.png)
<br /><br />
@@ -51,12 +59,12 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
## Features:
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
- Slowest endpoints in your application
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
- Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
<br /><br />
@@ -129,6 +137,25 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
- Jaegar UI doesnt show any metrics on traces or on filtered traces
- Jaeger cant get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
<p>&nbsp </p>
### SigNoz vs Elastic
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
- 50% lower resource requirement compared to Elastic during ingestion
We have published benchmarks comparing Elastic with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz supports aggregations on high-cardinality data over a huge volume while loki doesnt.
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
We have published benchmarks comparing Loki with SigNoz. Check it out [here](https://signoz.io/blog/logs-performance-benchmark/?utm_source=github-readme&utm_medium=logs-benchmark)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />

View File

@@ -27,12 +27,6 @@ For x86 chip (amd):
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
```
For Mac with Apple chip (arm):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
```
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI.

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

View File

@@ -236,8 +236,8 @@
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
To generate dhparams, use the following command:
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
@@ -618,148 +618,6 @@
</jdbc_bridge>
-->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>localhost</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
</test_shard_localhost>
<test_cluster_one_shard_three_replicas_localhost>
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard>
<!--shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard-->
</test_cluster_one_shard_three_replicas_localhost>
<test_cluster_two_shards_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_localhost>
<test_cluster_two_shards>
<shard>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards>
<test_cluster_two_shards_internal_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_internal_replication>
<test_shard_localhost_secure>
<shard>
<replica>
<host>localhost</host>
<port>9440</port>
<secure>1</secure>
</replica>
</shard>
</test_shard_localhost_secure>
<test_unavailable_shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>1</port>
</replica>
</shard>
</test_unavailable_shard>
</remote_servers>
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
If this section is not present in configuration, all hosts are allowed.
-->
@@ -786,29 +644,6 @@
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<!--
<zookeeper>
<node>
<host>example1</host>
<port>2181</port>
</node>
<node>
<host>example2</host>
<port>2181</port>
</node>
<node>
<host>example3</host>
<port>2181</port>
</node>
</zookeeper>
-->
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.

View File

@@ -1,30 +1,127 @@
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
deploy:
restart_policy:
condition: on-failure
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-clickhouse-depend: &clickhouse-depend
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.0
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:22.8.8-alpine
<<: *clickhouse-defaults
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
# - "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
# clickhouse-2:
# <<: *clickhouse-defaults
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.0-0.2
@@ -40,7 +137,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.11.4
image: signoz/query-service:0.16.2
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
@@ -66,11 +163,10 @@ services:
deploy:
restart_policy:
condition: on-failure
depends_on:
- clickhouse
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.11.4
image: signoz/frontend:0.16.2
deploy:
restart_policy:
condition: on-failure
@@ -83,7 +179,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.63.0
image: signoz/signoz-otel-collector:0.66.5
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@@ -91,6 +187,8 @@ services:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
@@ -107,11 +205,10 @@ services:
mode: global
restart_policy:
condition: on-failure
depends_on:
- clickhouse
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.63.0
image: signoz/signoz-otel-collector:0.66.5
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
@@ -123,8 +220,7 @@ services:
deploy:
restart_policy:
condition: on-failure
depends_on:
- clickhouse
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:1.30

View File

@@ -64,7 +64,9 @@ receivers:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
@@ -78,12 +80,16 @@ processors:
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 10000
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: 'signoz.collector.id'
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -103,15 +109,20 @@ processors:
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
@@ -144,9 +155,13 @@ service:
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@@ -7,7 +7,9 @@ receivers:
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
- localhost:8888
labels:
job_name: otel-collector-metrics
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s

View File

@@ -30,6 +30,8 @@ server {
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

View File

@@ -236,8 +236,8 @@
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
To generate dhparams, use the following command:
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
@@ -618,148 +618,6 @@
</jdbc_bridge>
-->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>localhost</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
</test_shard_localhost>
<test_cluster_one_shard_three_replicas_localhost>
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard>
<!--shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
</shard-->
</test_cluster_one_shard_three_replicas_localhost>
<test_cluster_two_shards_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_localhost>
<test_cluster_two_shards>
<shard>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards>
<test_cluster_two_shards_internal_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_internal_replication>
<test_shard_localhost_secure>
<shard>
<replica>
<host>localhost</host>
<port>9440</port>
<secure>1</secure>
</replica>
</shard>
</test_shard_localhost_secure>
<test_unavailable_shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>1</port>
</replica>
</shard>
</test_unavailable_shard>
</remote_servers>
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
If this section is not present in configuration, all hosts are allowed.
-->
@@ -786,29 +644,6 @@
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<!--
<zookeeper>
<node>
<host>example1</host>
<port>2181</port>
</node>
<node>
<host>example2</host>
<port>2181</port>
</node>
<node>
<host>example3</host>
<port>2181</port>
</node>
</zookeeper>
-->
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
@@ -1070,7 +905,8 @@
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Configuration of user defined executable functions -->
<user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config>
<user_defined_executable_functions_config>*function.xml</user_defined_executable_functions_config>
<user_scripts_path>/var/lib/clickhouse/user_scripts/</user_scripts_path>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.

View File

@@ -0,0 +1,21 @@
<functions>
<function>
<type>executable</type>
<name>histogramQuantile</name>
<return_type>Float64</return_type>
<argument>
<type>Array(Float64)</type>
<name>buckets</name>
</argument>
<argument>
<type>Array(Float64)</type>
<name>counts</name>
</argument>
<argument>
<type>Float64</type>
<name>quantile</name>
</argument>
<format>CSV</format>
<command>./histogramQuantile</command>
</function>
</functions>

View File

@@ -41,7 +41,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: otel-collector
image: signoz/signoz-otel-collector:0.63.0
image: signoz/signoz-otel-collector:0.66.5
command: ["--config=/etc/otel-collector-config.yaml"]
# user: root # required for reading docker container logs
volumes:
@@ -67,7 +67,7 @@ services:
otel-collector-metrics:
container_name: otel-collector-metrics
image: signoz/signoz-otel-collector:0.63.0
image: signoz/signoz-otel-collector:0.66.5
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml

View File

@@ -1,31 +1,145 @@
version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-clickhouse-depend: &clickhouse-depend
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.0
container_name: zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:22.8.8-alpine
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
<<: *clickhouse-defaults
container_name: clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:0.23.0-0.2
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.0-0.2}
volumes:
- ./data/alertmanager:/data
depends_on:
@@ -39,7 +153,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.11.4
image: signoz/query-service:${DOCKER_TAG:-0.16.2}
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
@@ -64,12 +178,10 @@ services:
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.11.4
image: signoz/frontend:${DOCKER_TAG:-0.16.2}
container_name: frontend
restart: on-failure
depends_on:
@@ -81,7 +193,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.63.0
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.5}
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@@ -89,6 +201,8 @@ services:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
@@ -102,12 +216,10 @@ services:
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.63.0
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.5}
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
@@ -117,20 +229,18 @@ services:
# - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"

View File

@@ -64,7 +64,10 @@ receivers:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
@@ -74,12 +77,16 @@ processors:
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 10000
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: 'signoz.collector.id'
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -111,16 +118,21 @@ extensions:
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
@@ -148,9 +160,13 @@ service:
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@@ -11,7 +11,9 @@ receivers:
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
- localhost:8888
labels:
job_name: otel-collector-metrics
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s

View File

@@ -0,0 +1,237 @@
package main
import (
"bufio"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
)
// NOTE: executable must be built with target OS and architecture set to linux/amd64
// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
// The following code is adapted from the following source:
// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
type bucket struct {
upperBound float64
count float64
}
// buckets implements sort.Interface.
type buckets []bucket
func (b buckets) Len() int { return len(b) }
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
// buckets will be sorted by upperBound by this function (i.e. no sorting
// needed before calling this function). The quantile value is interpolated
// assuming a linear distribution within a bucket. However, if the quantile
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
// returned. A natural lower bound of 0 is assumed if the upper bound of the
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
// happens linearly between 0 and the upper bound of the lowest bucket.
// However, if the lowest bucket has an upper bound less or equal 0, this upper
// bound is returned if the quantile falls into the lowest bucket.
//
// There are a number of special cases (once we have a way to report errors
// happening during evaluations of AST functions, we should report those
// explicitly):
//
// If 'buckets' has 0 observations, NaN is returned.
//
// If 'buckets' has fewer than 2 elements, NaN is returned.
//
// If the highest bucket is not +Inf, NaN is returned.
//
// If q==NaN, NaN is returned.
//
// If q<0, -Inf is returned.
//
// If q>1, +Inf is returned.
func bucketQuantile(q float64, buckets buckets) float64 {
if math.IsNaN(q) {
return math.NaN()
}
if q < 0 {
return math.Inf(-1)
}
if q > 1 {
return math.Inf(+1)
}
sort.Sort(buckets)
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
return math.NaN()
}
buckets = coalesceBuckets(buckets)
ensureMonotonic(buckets)
if len(buckets) < 2 {
return math.NaN()
}
observations := buckets[len(buckets)-1].count
if observations == 0 {
return math.NaN()
}
rank := q * observations
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
if b == len(buckets)-1 {
return buckets[len(buckets)-2].upperBound
}
if b == 0 && buckets[0].upperBound <= 0 {
return buckets[0].upperBound
}
var (
bucketStart float64
bucketEnd = buckets[b].upperBound
count = buckets[b].count
)
if b > 0 {
bucketStart = buckets[b-1].upperBound
count -= buckets[b-1].count
rank -= buckets[b-1].count
}
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
}
// coalesceBuckets merges buckets with the same upper bound.
//
// The input buckets must be sorted.
func coalesceBuckets(buckets buckets) buckets {
last := buckets[0]
i := 0
for _, b := range buckets[1:] {
if b.upperBound == last.upperBound {
last.count += b.count
} else {
buckets[i] = last
last = b
i++
}
}
buckets[i] = last
return buckets[:i+1]
}
// The assumption that bucket counts increase monotonically with increasing
// upperBound may be violated during:
//
// * Recording rule evaluation of histogram_quantile, especially when rate()
// has been applied to the underlying bucket timeseries.
// * Evaluation of histogram_quantile computed over federated bucket
// timeseries, especially when rate() has been applied.
//
// This is because scraped data is not made available to rule evaluation or
// federation atomically, so some buckets are computed with data from the
// most recent scrapes, but the other buckets are missing data from the most
// recent scrape.
//
// Monotonicity is usually guaranteed because if a bucket with upper bound
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
// have counted all c1 observations and perhaps more, so that c >= c1.
//
// Randomly interspersed partial sampling breaks that guarantee, and rate()
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
// monotonicity is broken. It is exacerbated by rate() because under normal
// operation, cumulative counting of buckets will cause the bucket counts to
// diverge such that small differences from missing samples are not a problem.
// rate() removes this divergence.)
//
// bucketQuantile depends on that monotonicity to do a binary search for the
// bucket with the φ-quantile count, so breaking the monotonicity
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
//
// As a somewhat hacky solution until ingestion is atomic per scrape, we
// calculate the "envelope" of the histogram buckets, essentially removing
// any decreases in the count between successive buckets.
func ensureMonotonic(buckets buckets) {
max := buckets[0].count
for i := 1; i < len(buckets); i++ {
switch {
case buckets[i].count > max:
max = buckets[i].count
case buckets[i].count < max:
buckets[i].count = max
}
}
}
// End of copied code.
func readLines() []string {
r := bufio.NewReader(os.Stdin)
bytes := []byte{}
lines := []string{}
for {
line, isPrefix, err := r.ReadLine()
if err != nil {
break
}
bytes = append(bytes, line...)
if !isPrefix {
str := strings.TrimSpace(string(bytes))
if len(str) > 0 {
lines = append(lines, str)
bytes = []byte{}
}
}
}
if len(bytes) > 0 {
lines = append(lines, string(bytes))
}
return lines
}
func main() {
lines := readLines()
for _, text := range lines {
// Example input
// "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
// bounds - counts - quantile
parts := strings.Split(text, "\",")
var bucketNumbers []float64
// Strip the ends with square brackets
text = parts[0][2 : len(parts[0])-1]
// Parse the bucket bounds
for _, num := range strings.Split(text, ",") {
num = strings.TrimSpace(num)
number, err := strconv.ParseFloat(num, 64)
if err == nil {
bucketNumbers = append(bucketNumbers, number)
}
}
var bucketCounts []float64
// Strip the ends with square brackets
text = parts[1][2 : len(parts[1])-1]
// Parse the bucket counts
for _, num := range strings.Split(text, ",") {
num = strings.TrimSpace(num)
number, err := strconv.ParseFloat(num, 64)
if err == nil {
bucketCounts = append(bucketCounts, number)
}
}
// Parse the quantile
q, err := strconv.ParseFloat(parts[2], 64)
var b buckets
if err == nil {
for i := 0; i < len(bucketNumbers); i++ {
b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
}
}
fmt.Println(bucketQuantile(q, b))
}
}

View File

@@ -30,6 +30,8 @@ server {
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html

View File

@@ -81,6 +81,11 @@ check_os() {
os="centos"
package_manager="yum"
;;
Rocky*)
desired_os=1
os="centos"
package_manager="yum"
;;
SLES*)
desired_os=1
os="sles"
@@ -223,7 +228,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -511,13 +516,15 @@ else
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
echo " By default, retention period is set to 7 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "👉 Need help Getting Started?"
echo "👉 Need help in Getting Started?"
echo -e "Join us on Slack https://signoz.io/slack"
echo ""
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"

View File

@@ -9,6 +9,7 @@ import (
"go.signoz.io/signoz/ee/query-service/license"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version"
)
@@ -93,6 +94,10 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
baseapp.OpenAccess(ah.receiveSAML)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/complete/google",
baseapp.OpenAccess(ah.receiveGoogleAuth)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
baseapp.AdminAccess(ah.listDomainsByOrg)).
Methods(http.MethodGet)
@@ -123,5 +128,11 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
version := version.GetVersion()
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
versionResponse := basemodel.GetVersionResponse{
Version: version,
EE: "Y",
SetupCompleted: ah.SetupCompleted,
}
ah.WriteJSON(w, r, versionResponse)
}

View File

@@ -8,9 +8,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
@@ -90,9 +88,16 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
// get invite object
invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil || invite == nil {
if err != nil {
zap.S().Errorf("failed to validate invite token", err)
RespondError(w, model.BadRequest(err), nil)
return
}
if invite == nil {
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
return
}
// get auth domain from email domain
@@ -184,114 +189,149 @@ func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, resp)
}
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
ssoError := []byte("Login failed. Please contact your system administrator")
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
base64.StdEncoding.Encode(dst, ssoError)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
}
// receiveGoogleAuth completes google OAuth response and forwards a request
// to front-end to sign user in
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
var apierr basemodel.BaseApiError
redirectOnError := func() {
ssoError := []byte("Login failed. Please contact your system administrator")
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
base64.StdEncoding.Encode(dst, ssoError)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, string(dst)), http.StatusMovedPermanently)
}
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[ReceiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
redirectOnError()
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
return
}
// the relay state is sent when a login request is submitted to
// Idp.
relayState := r.FormValue("RelayState")
zap.S().Debug("[ReceiveML] relay state", zap.String("relayState", relayState))
relayState := q.Get("state")
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
redirectOnError()
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// derive domain id from relay state now
var domainIdStr string
for k, v := range parsedState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
}
domainId, err := uuid.Parse(domainIdStr)
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to process request- failed to parse domain id ifrom relay", zap.Error(err))
redirectOnError()
handleSsoError(w, r, redirectUri)
return
}
domain, apierr := ah.AppDao().GetDomain(ctx, domainId)
if (apierr != nil) || domain == nil {
zap.S().Errorf("[ReceiveSAML] failed to process request- invalid domain", domainIdStr, zap.Error(apierr))
redirectOnError()
// now that we have domain, use domain to fetch sso settings.
// prepare google callback handler using parsedState -
// which contains redirect URL (front-end endpoint)
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
identity, err := callbackHandler.HandleCallback(r)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}
// receiveSAML completes a SAML request and gets user logged in
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// the relay state is sent when a login request is submitted to
// Idp.
relayState := r.FormValue("RelayState")
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to prepare saml request for domain (%s): %v", domainId, err)
redirectOnError()
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domainId, err)
redirectOnError()
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
if assertionInfo.WarningInfo.InvalidTime {
zap.S().Errorf("[ReceiveSAML] expired saml response for organization (%s): %v", domainId, err)
redirectOnError()
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
email := assertionInfo.NameID
// user email found, now start preparing jwt response
userPayload, baseapierr := ah.AppDao().GetUserByEmail(ctx, email)
if baseapierr != nil {
zap.S().Errorf("[ReceiveSAML] failed to find or register a new user for email %s and org %s", email, domainId, zap.Error(baseapierr.Err))
redirectOnError()
if email == "" {
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
handleSsoError(w, r, redirectUri)
return
}
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.S().Errorf("[ReceiveSAML] failed to generate access token for email %s and org %s", email, domainId, zap.Error(err))
redirectOnError()
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
userID := userPayload.User.Id
nextPage := fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
userID,
tokenStore.RefreshJwt)
http.Redirect(w, r, nextPage, http.StatusMovedPermanently)
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}

View File

@@ -1,8 +1,11 @@
package app
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // http profiler
@@ -20,12 +23,14 @@ import (
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/interfaces"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry"
@@ -117,6 +122,16 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
// start the usagemanager
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
if err != nil {
return nil, err
}
err = usageManager.Start()
if err != nil {
return nil, err
}
telemetry.GetInstance().SetReader(reader)
apiOpts := api.APIHandlerOptions{
@@ -255,15 +270,82 @@ func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
data := map[string]interface{}{}
var postData *model.QueryRangeParamsV2
if path == pathToExtractBodyFrom && (r.Method == "POST") {
if r.Body != nil {
bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, false
}
r.Body.Close() // must close
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &postData)
} else {
return nil, false
}
} else {
return nil, false
}
signozMetricNotFound := false
if postData != nil {
signozMetricNotFound = telemetry.GetInstance().CheckSigNozMetricsV2(postData.CompositeMetricQuery)
if postData.CompositeMetricQuery != nil {
data["queryType"] = postData.CompositeMetricQuery.QueryType
data["panelType"] = postData.CompositeMetricQuery.PanelType
}
data["datasource"] = postData.DataSource
}
if signozMetricNotFound {
telemetry.GetInstance().AddActiveMetricsUser()
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
}
return data, true
}
func getActiveLogs(path string, r *http.Request) {
// if path == "/api/v1/dashboards/{uuid}" {
// telemetry.GetInstance().AddActiveMetricsUser()
// }
if path == "/api/v1/logs" {
hasFilters := len(r.URL.Query().Get("q"))
if hasFilters > 0 {
telemetry.GetInstance().AddActiveLogsUser()
}
}
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range dashboardMetadata {
data[key] = value
}
}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)

View File

@@ -2,7 +2,7 @@ package dao
import (
"context"
"net/url"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/model"
@@ -22,7 +22,9 @@ type ModelDao interface {
// auth methods
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)

View File

@@ -10,9 +10,33 @@ import (
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.uber.org/zap"
)
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
if err != nil {
zap.S().Errorf("failed to generate token for SSO login user", err)
return "", model.InternalErrorStr("failed to generate token for the user")
}
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
userPayload.User.Id,
tokenStore.RefreshJwt), nil
}
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {

View File

@@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"encoding/json"
"net/url"
"fmt"
"strings"
"time"
@@ -25,6 +26,34 @@ type StoredDomain struct {
UpdatedAt int64 `db:"updated_at"`
}
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
}
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.S().Errorf("failed to parse domain id from relay state", err)
return nil, fmt.Errorf("failed to parse response from IdP response")
}
domain, err := m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
}
return domain, nil
}
// GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {

View File

@@ -127,7 +127,7 @@ func NewPostRequestWithCtx(ctx context.Context, url string, contentType string,
}
// SendUsage reports the usage of signoz to license server
func SendUsage(ctx context.Context, usage *model.UsagePayload) *model.ApiError {
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
reqString, _ := json.Marshal(usage)
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {

View File

@@ -9,8 +9,10 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
saml2 "github.com/russellhaering/gosaml2"
"go.signoz.io/signoz/ee/query-service/saml"
"go.signoz.io/signoz/ee/query-service/sso/saml"
"go.signoz.io/signoz/ee/query-service/sso"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
type SSOType string
@@ -20,12 +22,6 @@ const (
GoogleAuth SSOType = "GOOGLE_AUTH"
)
type SamlConfig struct {
SamlEntity string `json:"samlEntity"`
SamlIdp string `json:"samlIdp"`
SamlCert string `json:"samlCert"`
}
// OrgDomain identify org owned web domains for auth and other purposes
type OrgDomain struct {
Id uuid.UUID `json:"id"`
@@ -33,10 +29,17 @@ type OrgDomain struct {
OrgId string `json:"orgId"`
SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"`
SamlConfig *SamlConfig `json:"samlConfig"`
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
Org *basemodel.Organization
}
func (od *OrgDomain) String() string {
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.Id.String(), od.SsoType)
}
// Valid is used a pipeline function to check if org domain
// loaded from db is valid
func (od *OrgDomain) Valid(err error) error {
@@ -97,6 +100,16 @@ func (od *OrgDomain) GetSAMLCert() string {
return ""
}
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
// requesting OAuth and also used in processing response from google
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
if od.GoogleAuthConfig == nil {
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
}
return od.GoogleAuthConfig.GetProvider(od.Name, siteUrl)
}
// PrepareSamlRequest creates a request accordingly gosaml2
func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
@@ -124,19 +137,48 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
}
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
sp, err := od.PrepareSamlRequest(siteUrl)
if err != nil {
return "", err
}
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
// build redirect url from window.location sent by frontend
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
// prepare state that gets relayed back when the auth provider
// calls back our url. here we pass the app url (where signoz runs)
// and the domain Id. The domain Id helps in identifying sso config
// when the call back occurs and the app url is useful in redirecting user
// back to the right path.
// why do we need to pass app url? the callback typically is handled by backend
// and sometimes backend might right at a different port or is unaware of frontend
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
// along with frontend window.location and use it to relay the information through
// auth provider to the backend (HandleCallback or HandleSSO method).
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
switch (od.SsoType) {
case SAML:
sp, err := od.PrepareSamlRequest(siteUrl)
if err != nil {
return "", err
}
return sp.BuildAuthURL(relayState)
case GoogleAuth:
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
if err != nil {
return "", err
}
return googleProvider.BuildAuthURL(relayState)
default:
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
return "", fmt.Errorf("unsupported SSO config for the domain")
}
relayState := fmt.Sprintf("%s://%s%s?domainId=%s",
siteUrl.Scheme,
siteUrl.Host,
siteUrl.Path,
fmtDomainId)
return sp.BuildAuthURL(relayState)
}

View File

@@ -1,6 +1,7 @@
package model
import (
"fmt"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
@@ -44,6 +45,14 @@ func BadRequest(err error) *ApiError {
}
}
// BadRequestStr returns a ApiError object of bad request for string input
func BadRequestStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorBadData,
Err: fmt.Errorf(s),
}
}
// InternalError returns a ApiError object of internal type
func InternalError(err error) *ApiError {
return &ApiError{
@@ -52,6 +61,14 @@ func InternalError(err error) *ApiError {
}
}
// InternalErrorStr returns a ApiError object of internal type for string input
func InternalErrorStr(s string) *ApiError {
return &ApiError{
Typ: basemodel.ErrorInternal,
Err: fmt.Errorf(s),
}
}
var (
ErrorNone basemodel.ErrorType = ""
ErrorTimeout basemodel.ErrorType = "timeout"

View File

@@ -0,0 +1,68 @@
package model
import (
"fmt"
"context"
"net/url"
"golang.org/x/oauth2"
"github.com/coreos/go-oidc/v3/oidc"
"go.signoz.io/signoz/ee/query-service/sso"
)
// SamlConfig contans SAML params to generate and respond to the requests
// from SAML provider
type SamlConfig struct {
SamlEntity string `json:"samlEntity"`
SamlIdp string `json:"samlIdp"`
SamlCert string `json:"samlCert"`
}
// GoogleOauthConfig contains a generic config to support oauth
type GoogleOAuthConfig struct {
ClientID string `json:"clientId"`
ClientSecret string `json:"clientSecret"`
RedirectURI string `json:"redirectURI"`
}
const (
googleIssuerURL = "https://accounts.google.com"
)
func (g *GoogleOAuthConfig) GetProvider(domain string, siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
ctx, cancel := context.WithCancel(context.Background())
provider, err := oidc.NewProvider(ctx, googleIssuerURL)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to get provider: %v", err)
}
// default to email and profile scope as we just use google auth
// to verify identity and start a session.
scopes := []string{"email"}
// this is the url google will call after login completion
redirectURL := fmt.Sprintf("%s://%s/%s",
siteUrl.Scheme,
siteUrl.Host,
"api/v1/complete/google")
return &sso.GoogleOAuthProvider{
RedirectURI: g.RedirectURI,
OAuth2Config: &oauth2.Config{
ClientID: g.ClientID,
ClientSecret: g.ClientSecret,
Endpoint: provider.Endpoint(),
Scopes: scopes,
RedirectURL: redirectURL,
},
Verifier: provider.Verifier(
&oidc.Config{ClientID: g.ClientID},
),
Cancel: cancel,
HostedDomain: domain,
}, nil
}

View File

@@ -6,30 +6,27 @@ import (
"github.com/google/uuid"
)
type UsageSnapshot struct {
CurrentLogSizeBytes uint64 `json:"currentLogSizeBytes"`
CurrentLogSizeBytesColdStorage uint64 `json:"currentLogSizeBytesColdStorage"`
CurrentSpansCount uint64 `json:"currentSpansCount"`
CurrentSpansCountColdStorage uint64 `json:"currentSpansCountColdStorage"`
CurrentSamplesCount uint64 `json:"currentSamplesCount"`
CurrentSamplesCountColdStorage uint64 `json:"currentSamplesCountColdStorage"`
}
type UsageBase struct {
Id uuid.UUID `json:"id" db:"id"`
InstallationId uuid.UUID `json:"installationId" db:"installation_id"`
ActivationId uuid.UUID `json:"activationId" db:"activation_id"`
CreatedAt time.Time `json:"createdAt" db:"created_at"`
FailedSyncRequest int `json:"failedSyncRequest" db:"failed_sync_request_count"`
}
type UsagePayload struct {
UsageBase
Metrics UsageSnapshot `json:"metrics"`
SnapshotDate time.Time `json:"snapshotDate"`
InstallationId uuid.UUID `json:"installationId"`
LicenseKey uuid.UUID `json:"licenseKey"`
Usage []Usage `json:"usage"`
}
type Usage struct {
UsageBase
Snapshot string `db:"snapshot"`
CollectorID string `json:"collectorId"`
ExporterID string `json:"exporterId"`
Type string `json:"type"`
Tenant string `json:"tenant"`
TimeStamp time.Time `json:"timestamp"`
Count int64 `json:"count"`
Size int64 `json:"size"`
}
type UsageDB struct {
CollectorID string `ch:"collector_id" json:"collectorId"`
ExporterID string `ch:"exporter_id" json:"exporterId"`
Type string `ch:"-" json:"type"`
TimeStamp time.Time `ch:"timestamp" json:"timestamp"`
Tenant string `ch:"tenant" json:"tenant"`
Data string `ch:"data" json:"data"`
}

View File

@@ -0,0 +1,92 @@
package sso
import (
"fmt"
"errors"
"context"
"net/http"
"github.com/coreos/go-oidc/v3/oidc"
"golang.org/x/oauth2"
)
type GoogleOAuthProvider struct {
RedirectURI string
OAuth2Config *oauth2.Config
Verifier *oidc.IDTokenVerifier
Cancel context.CancelFunc
HostedDomain string
}
func (g *GoogleOAuthProvider) BuildAuthURL(state string) (string, error) {
var opts []oauth2.AuthCodeOption
// set hosted domain. google supports multiple hosted domains but in our case
// we have one config per host domain.
opts = append(opts, oauth2.SetAuthURLParam("hd", g.HostedDomain))
return g.OAuth2Config.AuthCodeURL(state, opts...), nil
}
type oauth2Error struct{
error string
errorDescription string
}
func (e *oauth2Error) Error() string {
if e.errorDescription == "" {
return e.error
}
return e.error + ": " + e.errorDescription
}
func (g *GoogleOAuthProvider) HandleCallback(r *http.Request) (identity *SSOIdentity, err error) {
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
return identity, &oauth2Error{errType, q.Get("error_description")}
}
token, err := g.OAuth2Config.Exchange(r.Context(), q.Get("code"))
if err != nil {
return identity, fmt.Errorf("google: failed to get token: %v", err)
}
return g.createIdentity(r.Context(), token)
}
func (g *GoogleOAuthProvider) createIdentity(ctx context.Context, token *oauth2.Token) (identity *SSOIdentity, err error) {
rawIDToken, ok := token.Extra("id_token").(string)
if !ok {
return identity, errors.New("google: no id_token in token response")
}
idToken, err := g.Verifier.Verify(ctx, rawIDToken)
if err != nil {
return identity, fmt.Errorf("google: failed to verify ID Token: %v", err)
}
var claims struct {
Username string `json:"name"`
Email string `json:"email"`
EmailVerified bool `json:"email_verified"`
HostedDomain string `json:"hd"`
}
if err := idToken.Claims(&claims); err != nil {
return identity, fmt.Errorf("oidc: failed to decode claims: %v", err)
}
if claims.HostedDomain != g.HostedDomain {
return identity, fmt.Errorf("oidc: unexpected hd claim %v", claims.HostedDomain)
}
identity = &SSOIdentity{
UserID: idToken.Subject,
Username: claims.Username,
Email: claims.Email,
EmailVerified: claims.EmailVerified,
ConnectorData: []byte(token.RefreshToken),
}
return identity, nil
}

View File

@@ -0,0 +1,31 @@
package sso
import (
"net/http"
)
// SSOIdentity contains details of user received from SSO provider
type SSOIdentity struct {
UserID string
Username string
PreferredUsername string
Email string
EmailVerified bool
ConnectorData []byte
}
// OAuthCallbackProvider is an interface implemented by connectors which use an OAuth
// style redirect flow to determine user information.
type OAuthCallbackProvider interface {
// The initial URL user would be redirect to.
// OAuth2 implementations support various scopes but we only need profile and user as
// the roles are still being managed in SigNoz.
BuildAuthURL(state string) (string, error)
// Handle the callback to the server (after login at oauth provider site)
// and return a email identity.
// At the moment we dont support auto signup flow (based on domain), so
// the full identity (including name, group etc) is not required outside of the
// connector
HandleCallback(r *http.Request) (identity *SSOIdentity, err error)
}

View File

@@ -4,18 +4,19 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.uber.org/zap"
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/ee/query-service/usage/repository"
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
)
@@ -27,9 +28,6 @@ const (
)
var (
// collect usage every hour
collectionFrequency = 1 * time.Hour
// send usage every 24 hour
uploadFrequency = 24 * time.Hour
@@ -37,8 +35,6 @@ var (
)
type Manager struct {
repository *repository.Repository
clickhouseConn clickhouse.Conn
licenseRepo *license.Repo
@@ -52,15 +48,9 @@ type Manager struct {
}
func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
repo := repository.New(db)
err := repo.Init(dbType)
if err != nil {
return nil, fmt.Errorf("failed to initiate usage repo: %v", err)
}
m := &Manager{
repository: repo,
// repository: repo,
clickhouseConn: clickhouseConn,
licenseRepo: licenseRepo,
}
@@ -74,6 +64,28 @@ func (lm *Manager) Start() error {
return fmt.Errorf("usage exporter is locked")
}
go lm.UsageExporter(context.Background())
return nil
}
func (lm *Manager) UsageExporter(ctx context.Context) {
defer close(lm.terminated)
uploadTicker := time.NewTicker(uploadFrequency)
defer uploadTicker.Stop()
for {
select {
case <-lm.done:
return
case <-uploadTicker.C:
lm.UploadUsage(ctx)
}
}
}
func (lm *Manager) UploadUsage(ctx context.Context) error {
// check if license is present or not
license, err := lm.licenseRepo.GetActiveLicense(context.Background())
if err != nil {
@@ -85,203 +97,81 @@ func (lm *Manager) Start() error {
return nil
}
// upload previous snapshots if any
err = lm.UploadUsage(context.Background())
if err != nil {
return err
}
// collect snapshot if incase it wasn't collect in (t - collectionFrequency)
err = lm.CollectCurrentUsage(context.Background())
if err != nil {
return err
}
go lm.UsageExporter(context.Background())
return nil
}
// CollectCurrentUsage checks if needs to collect usage data
func (lm *Manager) CollectCurrentUsage(ctx context.Context) error {
// check the DB if anything exist where timestamp > t - collectionFrequency
ts := time.Now().Add(-collectionFrequency)
alreadyCreated, err := lm.repository.CheckSnapshotGtCreatedAt(ctx, ts)
if err != nil {
return err
}
if !alreadyCreated {
zap.S().Info("Collecting current usage")
exportError := lm.CollectAndStoreUsage(ctx)
if exportError != nil {
return exportError
}
} else {
zap.S().Info("Nothing to collect")
}
return nil
}
func (lm *Manager) UsageExporter(ctx context.Context) {
defer close(lm.terminated)
collectionTicker := time.NewTicker(collectionFrequency)
defer collectionTicker.Stop()
uploadTicker := time.NewTicker(uploadFrequency)
defer uploadTicker.Stop()
for {
select {
case <-lm.done:
return
case <-collectionTicker.C:
lm.CollectAndStoreUsage(ctx)
case <-uploadTicker.C:
lm.UploadUsage(ctx)
// remove the old snapshots
lm.repository.DropOldSnapshots(ctx)
}
}
}
type TableSize struct {
Table string `ch:"table"`
DiskName string `ch:"disk_name"`
Rows uint64 `ch:"rows"`
UncompressedBytes uint64 `ch:"uncompressed_bytes"`
}
func (lm *Manager) CollectAndStoreUsage(ctx context.Context) error {
snap, err := lm.GetUsageFromClickHouse(ctx)
if err != nil {
return err
}
license, err := lm.licenseRepo.GetActiveLicense(ctx)
if err != nil {
return err
}
activationId, _ := uuid.Parse(license.ActivationId)
// TODO (nitya) : Add installation ID in the payload
payload := model.UsagePayload{
UsageBase: model.UsageBase{
ActivationId: activationId,
FailedSyncRequest: 0,
},
Metrics: *snap,
SnapshotDate: time.Now(),
}
err = lm.repository.InsertSnapshot(ctx, &payload)
if err != nil {
return err
}
return nil
}
func (lm *Manager) GetUsageFromClickHouse(ctx context.Context) (*model.UsageSnapshot, error) {
tableSizes := []TableSize{}
snap := model.UsageSnapshot{}
usages := []model.UsageDB{}
// get usage from clickhouse
dbs := []string{"signoz_logs", "signoz_traces", "signoz_metrics"}
query := `
SELECT
table,
disk_name,
sum(rows) as rows,
sum(data_uncompressed_bytes) AS uncompressed_bytes
FROM system.parts
WHERE active AND (database in ('signoz_logs', 'signoz_metrics', 'signoz_traces')) AND (table in ('logs','samples_v2', 'signoz_index_v2'))
GROUP BY
table,
disk_name
ORDER BY table
SELECT tenant, collector_id, exporter_id, timestamp, data
FROM %s.distributed_usage as u1
GLOBAL INNER JOIN
(SELECT
tenant, collector_id, exporter_id, MAX(timestamp) as ts
FROM %s.distributed_usage as u2
where timestamp >= $1
GROUP BY tenant, collector_id, exporter_id
) as t1
ON
u1.tenant = t1.tenant AND u1.collector_id = t1.collector_id AND u1.exporter_id = t1.exporter_id and u1.timestamp = t1.ts
order by timestamp
`
err := lm.clickhouseConn.Select(ctx, &tableSizes, query)
if err != nil {
return nil, err
}
for _, val := range tableSizes {
switch val.Table {
case "logs":
if val.DiskName == "default" {
snap.CurrentLogSizeBytes = val.UncompressedBytes
} else {
snap.CurrentLogSizeBytesColdStorage = val.UncompressedBytes
}
case "samples_v2":
if val.DiskName == "default" {
snap.CurrentSamplesCount = val.Rows
} else {
snap.CurrentSamplesCountColdStorage = val.Rows
}
case "signoz_index_v2":
if val.DiskName == "default" {
snap.CurrentSpansCount = val.Rows
} else {
snap.CurrentSpansCountColdStorage = val.Rows
}
for _, db := range dbs {
dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
return err
}
for _, u := range dbusages {
u.Type = db
usages = append(usages, u)
}
}
return &snap, nil
}
func (lm *Manager) UploadUsage(ctx context.Context) error {
snapshots, err := lm.repository.GetSnapshotsNotSynced(ctx)
if err != nil {
return err
}
if len(snapshots) <= 0 {
if len(usages) <= 0 {
zap.S().Info("no snapshots to upload, skipping.")
return nil
}
zap.S().Info("uploading snapshots")
for _, snap := range snapshots {
metricsBytes, err := encryption.Decrypt([]byte(snap.ActivationId.String()[:32]), []byte(snap.Snapshot))
zap.S().Info("uploading usage data")
usagesPayload := []model.Usage{}
for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil {
return err
}
metrics := model.UsageSnapshot{}
err = json.Unmarshal(metricsBytes, &metrics)
usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil {
return err
}
err = lm.UploadUsageWithExponentalBackOff(ctx, model.UsagePayload{
UsageBase: model.UsageBase{
Id: snap.Id,
InstallationId: snap.InstallationId,
ActivationId: snap.ActivationId,
FailedSyncRequest: snap.FailedSyncRequest,
},
SnapshotDate: snap.CreatedAt,
Metrics: metrics,
})
if err != nil {
return err
}
usageData.CollectorID = usage.CollectorID
usageData.ExporterID = usage.ExporterID
usageData.Type = usage.Type
usageData.Tenant = usage.Tenant
usagesPayload = append(usagesPayload, usageData)
}
key, _ := uuid.Parse(license.Key)
payload := model.UsagePayload{
LicenseKey: key,
Usage: usagesPayload,
}
err = lm.UploadUsageWithExponentalBackOff(ctx, payload)
if err != nil {
return err
}
return nil
}
func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
for i := 1; i <= MaxRetries; i++ {
apiErr := licenseserver.SendUsage(ctx, &payload)
apiErr := licenseserver.SendUsage(ctx, payload)
if apiErr != nil && i == MaxRetries {
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
if err != nil {
zap.S().Errorf("failed to updated the failure count for snapshot in DB : ", zap.Error(err))
return err
}
zap.S().Errorf("retries stopped : %v", zap.Error(err))
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
// not returning error here since it is captured in the failed count
return nil
} else if apiErr != nil {
@@ -289,24 +179,10 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
sleepDuration := RetryInterval * time.Duration(i)
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
time.Sleep(sleepDuration)
// update the failed request count
err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
if err != nil {
zap.S().Errorf("failed to updated the failure count for snapshot in DB : %v", zap.Error(err))
return err
}
} else {
break
}
}
// update the database that it is synced
err := lm.repository.MoveToSynced(ctx, payload.Id)
if err != nil {
return err
}
return nil
}

View File

@@ -1,139 +0,0 @@
package repository
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/ee/query-service/usage/sqlite"
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
)
const (
MaxFailedSyncCount = 9 // a snapshot will be ignored if the max failed count is greater than or equal to 9
SnapShotLife = 3 * 24 * time.Hour
)
// Repository is usage Repository which stores usage snapshot in a secured DB
type Repository struct {
db *sqlx.DB
}
// New initiates a new usage Repository
func New(db *sqlx.DB) *Repository {
return &Repository{
db: db,
}
}
func (r *Repository) Init(engine string) error {
switch engine {
case "sqlite3", "sqlite":
return sqlite.InitDB(r.db)
default:
return fmt.Errorf("unsupported db")
}
}
func (r *Repository) InsertSnapshot(ctx context.Context, usage *model.UsagePayload) error {
snapshotBytes, err := json.Marshal(usage.Metrics)
if err != nil {
return err
}
usage.Id = uuid.New()
encryptedSnapshot, err := encryption.Encrypt([]byte(usage.ActivationId.String()[:32]), snapshotBytes)
if err != nil {
return err
}
query := `INSERT INTO usage(id, activation_id, snapshot)
VALUES ($1, $2, $3)`
_, err = r.db.ExecContext(ctx,
query,
usage.Id,
usage.ActivationId,
string(encryptedSnapshot),
)
if err != nil {
zap.S().Errorf("error inserting usage data: %v", zap.Error(err))
return fmt.Errorf("failed to insert usage in db: %v", err)
}
return nil
}
func (r *Repository) MoveToSynced(ctx context.Context, id uuid.UUID) error {
query := `UPDATE usage
SET synced = 'true',
synced_at = $1
WHERE id = $2`
_, err := r.db.ExecContext(ctx, query, time.Now(), id)
if err != nil {
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
return fmt.Errorf("failed to update usage in db: %v", err)
}
return nil
}
func (r *Repository) IncrementFailedRequestCount(ctx context.Context, id uuid.UUID) error {
query := `UPDATE usage SET failed_sync_request_count = failed_sync_request_count + 1 WHERE id = $1`
_, err := r.db.ExecContext(ctx, query, id)
if err != nil {
zap.S().Errorf("error in updating usage: %v", zap.Error(err))
return fmt.Errorf("failed to update usage in db: %v", err)
}
return nil
}
func (r *Repository) GetSnapshotsNotSynced(ctx context.Context) ([]*model.Usage, error) {
snapshots := []*model.Usage{}
query := `SELECT id,created_at, activation_id, snapshot, failed_sync_request_count from usage where synced!='true' and failed_sync_request_count < $1 order by created_at asc `
err := r.db.SelectContext(ctx, &snapshots, query, MaxFailedSyncCount)
if err != nil {
return nil, err
}
return snapshots, nil
}
func (r *Repository) DropOldSnapshots(ctx context.Context) error {
query := `delete from usage where created_at <= $1`
_, err := r.db.ExecContext(ctx, query, time.Now().Add(-(SnapShotLife)))
if err != nil {
zap.S().Errorf("failed to remove old snapshots from db: %v", zap.Error(err))
return err
}
return nil
}
// CheckSnapshotGtCreatedAt checks if there is any snapshot greater than the provided timestamp
func (r *Repository) CheckSnapshotGtCreatedAt(ctx context.Context, ts time.Time) (bool, error) {
var snapshots uint64
query := `SELECT count() from usage where created_at > '$1'`
err := r.db.QueryRowContext(ctx, query, ts).Scan(&snapshots)
if err != nil {
return false, err
}
return snapshots > 0, err
}

View File

@@ -1,32 +0,0 @@
package sqlite
import (
"fmt"
"github.com/jmoiron/sqlx"
)
func InitDB(db *sqlx.DB) error {
var err error
if db == nil {
return fmt.Errorf("invalid db connection")
}
table_schema := `CREATE TABLE IF NOT EXISTS usage(
id UUID PRIMARY KEY,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
activation_id UUID,
snapshot TEXT,
synced BOOLEAN DEFAULT 'false',
synced_at TIMESTAMP,
failed_sync_request_count INTEGER DEFAULT 0
);
`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating usage table: %v", err.Error())
}
return nil
}

View File

@@ -58,7 +58,7 @@ module.exports = {
'react/no-array-index-key': 'error',
'linebreak-style': [
'error',
process.platform === 'win32' ? 'windows' : 'unix',
process.env.platform === 'win32' ? 'windows' : 'unix',
],
'@typescript-eslint/default-param-last': 'off',
@@ -102,9 +102,10 @@ module.exports = {
},
],
'@typescript-eslint/no-unused-vars': 'error',
'func-style': ['error', 'declaration', { allowArrowFunctions: true }],
'arrow-body-style': ['error', 'as-needed'],
// eslint rules need to remove
'no-shadow': 'off',
'@typescript-eslint/no-shadow': 'off',
'import/no-cycle': 'off',

View File

@@ -1,4 +1,4 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && npm run commitlint
cd frontend && yarn run commitlint --edit $1

1
frontend/.yarnrc Normal file
View File

@@ -0,0 +1 @@
network-timeout 600000

View File

@@ -9,8 +9,9 @@ ARG TARGETARCH
WORKDIR /frontend
# Copy the package.json to install dependencies
# Copy the package.json and .yarnrc files prior to install dependencies
COPY package.json ./
COPY .yarnrc ./
# Install the dependencies and make the folder
RUN CI=1 yarn install

View File

@@ -15,7 +15,7 @@ const config: Config.InitialOptions = {
useESM: true,
},
},
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
preset: 'ts-jest/presets/js-with-ts-esm',
transform: {
'^.+\\.(ts|tsx)?$': 'ts-jest',
@@ -25,6 +25,7 @@ const config: Config.InitialOptions = {
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'],
moduleDirectories: ['node_modules', 'src'],
testEnvironment: 'jest-environment-jsdom',
testEnvironmentOptions: {
'jest-playwright': {
browsers: ['chromium', 'firefox', 'webkit'],

View File

@@ -27,16 +27,12 @@
"author": "",
"license": "ISC",
"dependencies": {
"@ant-design/colors": "^6.0.0",
"@ant-design/icons": "^4.6.2",
"@ant-design/colors": "6.0.0",
"@ant-design/icons": "4.8.0",
"@grafana/data": "^8.4.3",
"@monaco-editor/react": "^4.3.1",
"@testing-library/jest-dom": "^5.11.4",
"@testing-library/react": "^11.1.0",
"@testing-library/user-event": "^12.1.10",
"@welldone-software/why-did-you-render": "^6.2.1",
"@xstate/react": "^3.0.0",
"antd": "4.19.2",
"antd": "5.0.5",
"axios": "^0.21.0",
"babel-eslint": "^10.1.0",
"babel-jest": "^26.6.0",
@@ -44,7 +40,7 @@
"babel-plugin-named-asset-import": "^0.3.7",
"babel-preset-minify": "^0.5.1",
"babel-preset-react-app": "^10.0.0",
"chart.js": "^3.4.0",
"chart.js": "3.9.1",
"chartjs-adapter-date-fns": "^2.0.0",
"chartjs-plugin-annotation": "^1.4.0",
"color": "^4.2.1",
@@ -59,6 +55,7 @@
"event-source-polyfill": "1.0.31",
"file-loader": "6.1.1",
"flat": "^5.0.2",
"fontfaceobserver": "2.3.0",
"history": "4.10.1",
"html-webpack-plugin": "5.1.0",
"i18next": "^21.6.12",
@@ -70,17 +67,18 @@
"less-loader": "^10.2.0",
"lodash-es": "^4.17.21",
"mini-css-extract-plugin": "2.4.5",
"react": "17.0.0",
"react-dom": "17.0.0",
"react": "18.2.0",
"react-dom": "18.2.0",
"react-force-graph": "^1.41.0",
"react-graph-vis": "^1.0.5",
"react-grid-layout": "^1.3.4",
"react-i18next": "^11.16.1",
"react-intersection-observer": "9.4.1",
"react-query": "^3.34.19",
"react-redux": "^7.2.2",
"react-router-dom": "^5.2.0",
"react-use": "^17.3.2",
"react-vis": "^1.11.7",
"react-virtuoso": "4.0.3",
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"stream": "^0.0.2",
@@ -120,7 +118,9 @@
"@commitlint/config-conventional": "^16.2.4",
"@jest/globals": "^27.5.1",
"@playwright/test": "^1.22.0",
"@testing-library/react-hooks": "^7.0.2",
"@testing-library/jest-dom": "5.16.5",
"@testing-library/react": "13.4.0",
"@testing-library/user-event": "14.4.3",
"@types/color": "^3.0.3",
"@types/compression-webpack-plugin": "^9.0.0",
"@types/copy-webpack-plugin": "^8.0.1",
@@ -128,16 +128,17 @@
"@types/d3-tip": "^3.5.5",
"@types/event-source-polyfill": "^1.0.0",
"@types/flat": "^5.0.2",
"@types/fontfaceobserver": "2.1.0",
"@types/jest": "^27.5.1",
"@types/lodash-es": "^4.17.4",
"@types/mini-css-extract-plugin": "^2.5.1",
"@types/node": "^16.10.3",
"@types/react": "^17.0.0",
"@types/react-dom": "^16.9.9",
"@types/react": "18.0.26",
"@types/react-dom": "18.0.10",
"@types/react-grid-layout": "^1.1.2",
"@types/react-redux": "^7.1.11",
"@types/react-resizable": "3.0.3",
"@types/react-router-dom": "^5.1.6",
"@types/redux": "^3.6.0",
"@types/styled-components": "^5.1.4",
"@types/uuid": "^8.3.1",
"@types/vis": "^4.21.21",
@@ -145,6 +146,7 @@
"@types/webpack-dev-server": "^4.3.0",
"@typescript-eslint/eslint-plugin": "^4.28.2",
"@typescript-eslint/parser": "^4.28.2",
"@welldone-software/why-did-you-render": "6.2.1",
"autoprefixer": "^9.0.0",
"babel-plugin-styled-components": "^1.12.0",
"compression-webpack-plugin": "9.0.0",
@@ -173,7 +175,9 @@
"lint-staged": "^12.3.7",
"portfinder-sync": "^0.0.2",
"prettier": "2.2.1",
"react-hooks-testing-library": "0.6.0",
"react-hot-loader": "^4.13.0",
"react-resizable": "3.0.4",
"ts-jest": "^27.1.4",
"ts-node": "^10.2.1",
"typescript-plugin-css-modules": "^3.4.0",
@@ -186,7 +190,7 @@
]
},
"resolutions": {
"@types/react": "17.0.0",
"@types/react-dom": "17.0.0"
"@types/react": "18.0.26",
"@types/react-dom": "18.0.10"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,3 @@
{
"search_tags": "Search Tag Names"
}

View File

@@ -0,0 +1,3 @@
{
"search_tags": "Search Tag Names"
}

View File

@@ -6,7 +6,7 @@
"release_notes": "Release Notes",
"read_how_to_upgrade": "Read instructions on how to upgrade",
"latest_version_signoz": "You are running the latest version of SigNoz.",
"stale_version": "You are on an older version and may be losing out on the latest features we have shipped. We recommend to upgrade to the latest version",
"stale_version": "You are on an older version and may be missing out on the latest features we have shipped. We recommend to upgrade to the latest version",
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
"n_a": "N/A",
"routes": {

View File

@@ -1,11 +1,11 @@
/* eslint-disable react-hooks/exhaustive-deps */
import { notification } from 'antd';
import getLocalStorageApi from 'api/browser/localstorage/get';
import loginApi from 'api/user/login';
import { Logout } from 'api/utils';
import Spinner from 'components/Spinner';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import { useNotifications } from 'hooks/useNotifications';
import history from 'lib/history';
import React, { useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -47,6 +47,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const dispatch = useDispatch<Dispatch<AppActions>>();
const { notifications } = useNotifications();
const currentRoute = mapRoutes.get('current');
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
@@ -106,7 +108,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
} else {
Logout();
notification.error({
notifications.error({
message: response.error || t('something_went_wrong'),
});
}

View File

@@ -1,6 +1,9 @@
import { ConfigProvider } from 'antd';
import NotFound from 'components/NotFound';
import Spinner from 'components/Spinner';
import AppLayout from 'container/AppLayout';
import { useThemeConfig } from 'hooks/useDarkMode';
import { NotificationProvider } from 'hooks/useNotifications';
import history from 'lib/history';
import React, { Suspense } from 'react';
import { Route, Router, Switch } from 'react-router-dom';
@@ -9,29 +12,33 @@ import PrivateRoute from './Private';
import routes from './routes';
function App(): JSX.Element {
return (
<Router history={history}>
<PrivateRoute>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => {
return (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
);
})}
const themeConfig = useThemeConfig();
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</PrivateRoute>
</Router>
return (
<ConfigProvider theme={themeConfig}>
<NotificationProvider>
<Router history={history}>
<PrivateRoute>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
))}
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</PrivateRoute>
</Router>
</NotificationProvider>
</ConfigProvider>
);
}

View File

@@ -57,6 +57,7 @@ const afterLogin = async (
profilePictureURL: payload.profilePictureURL,
userId: payload.id,
orgId: payload.orgId,
userFlags: payload.flags,
},
});

View File

@@ -1,4 +1,4 @@
import axios from 'api';
import { ApiV2Instance as axios } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
@@ -8,9 +8,7 @@ const query = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(
`/variables/query?query=${encodeURIComponent(props.query)}`,
);
const response = await axios.post(`/variables/query`, props);
return {
statusCode: 200,

View File

@@ -4,14 +4,16 @@ import { ENVIRONMENT } from 'constants/env';
import { LOCALSTORAGE } from 'constants/localStorage';
import { EventSourcePolyfill } from 'event-source-polyfill';
export const LiveTail = (queryParams: string): EventSourcePolyfill => {
const dict = {
headers: {
Authorization: `Bearer ${getLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN)}`,
},
};
return new EventSourcePolyfill(
// 10 min in ms
const TIMEOUT_IN_MS = 10 * 60 * 1000;
export const LiveTail = (queryParams: string): EventSourcePolyfill =>
new EventSourcePolyfill(
`${ENVIRONMENT.baseURL}${apiV1}logs/tail?${queryParams}`,
dict,
{
headers: {
Authorization: `Bearer ${getLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN)}`,
},
heartbeatTimeout: TIMEOUT_IN_MS,
},
);
};

View File

@@ -32,6 +32,7 @@ const getFilters = async (
maxDuration: String((duration.duration || [])[0] || ''),
minDuration: String((duration.duration || [])[1] || ''),
exclude,
spanKind: props.spanKind,
});
return {

View File

@@ -10,9 +10,11 @@ const getSpans = async (
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const updatedSelectedTags = props.selectedTags.map((e) => ({
Key: e.Key[0],
Key: e.Key,
Operator: e.Operator,
Values: e.Values,
StringValues: e.StringValues,
NumberValues: e.NumberValues,
BoolValues: e.BoolValues,
}));
const exclude: string[] = [];
@@ -35,13 +37,14 @@ const getSpans = async (
start: String(props.start),
end: String(props.end),
function: props.function,
groupBy: props.groupBy,
groupBy: props.groupBy === 'none' ? '' : props.groupBy,
step: props.step,
tags: updatedSelectedTags,
...nonDuration,
maxDuration: String((duration.duration || [])[0] || ''),
minDuration: String((duration.duration || [])[1] || ''),
exclude,
spanKind: props.spanKind,
},
);

View File

@@ -28,9 +28,11 @@ const getSpanAggregate = async (
});
const updatedSelectedTags = props.selectedTags.map((e) => ({
Key: e.Key[0],
Key: e.Key,
Operator: e.Operator,
Values: e.Values,
StringValues: e.StringValues,
NumberValues: e.NumberValues,
BoolValues: e.BoolValues,
}));
const other = Object.fromEntries(props.selectedFilter);
@@ -46,6 +48,7 @@ const getSpanAggregate = async (
maxDuration: String((duration.duration || [])[0] || ''),
minDuration: String((duration.duration || [])[1] || ''),
exclude,
spanKind: props.spanKind,
});
return {

View File

@@ -32,6 +32,7 @@ const getTagFilters = async (
maxDuration: String((duration.duration || [])[0] || ''),
minDuration: String((duration.duration || [])[1] || ''),
exclude,
spanKind: props.spanKind,
});
return {

View File

@@ -11,9 +11,12 @@ const getTagValue = async (
const response = await axios.post<PayloadProps>(`/getTagValues`, {
start: props.start.toString(),
end: props.end.toString(),
tagKey: props.tagKey,
tagKey: {
Key: props.tagKey.Key,
Type: props.tagKey.Type,
},
spanKind: props.spanKind,
});
return {
statusCode: 200,
error: null,

View File

@@ -0,0 +1,26 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/user/setFlags';
const setFlags = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.patch(`/user/${props.userId}/flags`, {
...props.flags,
});
return {
statusCode: 200,
error: null,
message: response.data?.status,
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default setFlags;

View File

@@ -41,6 +41,7 @@ export const Logout = (): void => {
orgName: '',
profilePictureURL: '',
userId: '',
userFlags: {},
},
});

View File

@@ -1,9 +0,0 @@
import generatePicker from 'antd/es/date-picker/generatePicker';
import { Dayjs } from 'dayjs';
// included in antd
// eslint-disable-next-line import/no-extraneous-dependencies
import dayjsGenerateConfig from 'rc-picker/lib/generate/dayjs';
const DatePicker = generatePicker<Dayjs>(dayjsGenerateConfig);
export default DatePicker;

View File

@@ -1,8 +1,6 @@
import MEditor, { EditorProps } from '@monaco-editor/react';
import { useIsDarkMode } from 'hooks/useDarkMode';
import React from 'react';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import AppReducer from 'types/reducer/app';
function Editor({
value,
@@ -12,7 +10,7 @@ function Editor({
height,
options,
}: MEditorProps): JSX.Element {
const { isDarkMode } = useSelector<AppState, AppReducer>((state) => state.app);
const isDarkMode = useIsDarkMode();
return (
<MEditor
theme={isDarkMode ? 'vs-dark' : 'vs-light'}

View File

@@ -0,0 +1,321 @@
import { Chart, ChartTypeRegistry, Plugin } from 'chart.js';
import * as ChartHelpers from 'chart.js/helpers';
// utils
import { ChartEventHandler, mergeDefaultOptions } from './utils';
export const dragSelectPluginId = 'drag-select-plugin';
type ChartDragHandlers = {
mousedown: ChartEventHandler;
mousemove: ChartEventHandler;
mouseup: ChartEventHandler;
globalMouseup: () => void;
};
export type DragSelectPluginOptions = {
color?: string;
onSelect?: (startValueX: number, endValueX: number) => void;
};
const defaultDragSelectPluginOptions: Required<DragSelectPluginOptions> = {
color: 'rgba(0, 0, 0, 0.5)',
onSelect: () => {},
};
export function createDragSelectPluginOptions(
isEnabled: boolean,
onSelect?: (start: number, end: number) => void,
color?: string,
): DragSelectPluginOptions | false {
if (!isEnabled) {
return false;
}
return {
onSelect,
color,
};
}
function createMousedownHandler(
chart: Chart,
dragData: DragSelectData,
): ChartEventHandler {
return (ev): void => {
const { left, right } = chart.chartArea;
let { x: startDragPositionX } = ChartHelpers.getRelativePosition(ev, chart);
if (left > startDragPositionX) {
startDragPositionX = left;
}
if (right < startDragPositionX) {
startDragPositionX = right;
}
const startValuePositionX = chart.scales.x.getValueForPixel(
startDragPositionX,
);
dragData.onDragStart(startDragPositionX, startValuePositionX);
};
}
function createMousemoveHandler(
chart: Chart,
dragData: DragSelectData,
): ChartEventHandler {
return (ev): void => {
if (!dragData.isMouseDown) {
return;
}
const { left, right } = chart.chartArea;
let { x: dragPositionX } = ChartHelpers.getRelativePosition(ev, chart);
if (left > dragPositionX) {
dragPositionX = left;
}
if (right < dragPositionX) {
dragPositionX = right;
}
const valuePositionX = chart.scales.x.getValueForPixel(dragPositionX);
dragData.onDrag(dragPositionX, valuePositionX);
chart.update('none');
};
}
function createMouseupHandler(
chart: Chart,
options: DragSelectPluginOptions,
dragData: DragSelectData,
): ChartEventHandler {
return (ev): void => {
const { left, right } = chart.chartArea;
let { x: endRelativePostionX } = ChartHelpers.getRelativePosition(ev, chart);
if (left > endRelativePostionX) {
endRelativePostionX = left;
}
if (right < endRelativePostionX) {
endRelativePostionX = right;
}
const endValuePositionX = chart.scales.x.getValueForPixel(
endRelativePostionX,
);
dragData.onDragEnd(endRelativePostionX, endValuePositionX);
chart.update('none');
if (
typeof options.onSelect === 'function' &&
typeof dragData.startValuePositionX === 'number' &&
typeof dragData.endValuePositionX === 'number'
) {
const start = Math.min(
dragData.startValuePositionX,
dragData.endValuePositionX,
);
const end = Math.max(
dragData.startValuePositionX,
dragData.endValuePositionX,
);
options.onSelect(start, end);
}
};
}
function createGlobalMouseupHandler(
options: DragSelectPluginOptions,
dragData: DragSelectData,
): () => void {
return (): void => {
const { isDragging, endRelativePixelPositionX, endValuePositionX } = dragData;
if (!isDragging) {
return;
}
dragData.onDragEnd(
endRelativePixelPositionX as number,
endValuePositionX as number,
);
if (
typeof options.onSelect === 'function' &&
typeof dragData.startValuePositionX === 'number' &&
typeof dragData.endValuePositionX === 'number'
) {
const start = Math.min(
dragData.startValuePositionX,
dragData.endValuePositionX,
);
const end = Math.max(
dragData.startValuePositionX,
dragData.endValuePositionX,
);
options.onSelect(start, end);
}
};
}
class DragSelectData {
public isDragging = false;
public isMouseDown = false;
public startRelativePixelPositionX: number | null = null;
public startValuePositionX: number | null | undefined = null;
public endRelativePixelPositionX: number | null = null;
public endValuePositionX: number | null | undefined = null;
public initialize(): void {
this.isDragging = false;
this.isMouseDown = false;
this.startRelativePixelPositionX = null;
this.startValuePositionX = null;
this.endRelativePixelPositionX = null;
this.endValuePositionX = null;
}
public onDragStart(
startRelativePixelPositionX: number,
startValuePositionX: number | undefined,
): void {
this.isDragging = false;
this.isMouseDown = true;
this.startRelativePixelPositionX = startRelativePixelPositionX;
this.startValuePositionX = startValuePositionX;
this.endRelativePixelPositionX = null;
this.endValuePositionX = null;
}
public onDrag(
endRelativePixelPositionX: number,
endValuePositionX: number | undefined,
): void {
this.isDragging = true;
this.endRelativePixelPositionX = endRelativePixelPositionX;
this.endValuePositionX = endValuePositionX;
}
public onDragEnd(
endRelativePixelPositionX: number,
endValuePositionX: number | undefined,
): void {
if (!this.isDragging) {
this.initialize();
return;
}
this.isDragging = false;
this.isMouseDown = false;
this.endRelativePixelPositionX = endRelativePixelPositionX;
this.endValuePositionX = endValuePositionX;
}
}
export const createDragSelectPlugin = (): Plugin<
keyof ChartTypeRegistry,
DragSelectPluginOptions
> => {
const dragData = new DragSelectData();
let pluginOptions: Required<DragSelectPluginOptions>;
const handlers: ChartDragHandlers = {
mousedown: () => {},
mousemove: () => {},
mouseup: () => {},
globalMouseup: () => {},
};
const dragSelectPlugin: Plugin<
keyof ChartTypeRegistry,
DragSelectPluginOptions
> = {
id: dragSelectPluginId,
start: (chart: Chart, _, passedOptions) => {
pluginOptions = mergeDefaultOptions(
passedOptions,
defaultDragSelectPluginOptions,
);
const { canvas } = chart;
dragData.initialize();
const mousedownHandler = createMousedownHandler(chart, dragData);
const mousemoveHandler = createMousemoveHandler(chart, dragData);
const mouseupHandler = createMouseupHandler(chart, pluginOptions, dragData);
const globalMouseupHandler = createGlobalMouseupHandler(
pluginOptions,
dragData,
);
canvas.addEventListener('mousedown', mousedownHandler, { passive: true });
canvas.addEventListener('mousemove', mousemoveHandler, { passive: true });
canvas.addEventListener('mouseup', mouseupHandler, { passive: true });
document.addEventListener('mouseup', globalMouseupHandler, {
passive: true,
});
handlers.mousedown = mousedownHandler;
handlers.mousemove = mousemoveHandler;
handlers.mouseup = mouseupHandler;
handlers.globalMouseup = globalMouseupHandler;
},
beforeDestroy: (chart: Chart) => {
const { canvas } = chart;
if (!canvas) {
return;
}
canvas.removeEventListener('mousedown', handlers.mousedown);
canvas.removeEventListener('mousemove', handlers.mousemove);
canvas.removeEventListener('mouseup', handlers.mouseup);
document.removeEventListener('mouseup', handlers.globalMouseup);
},
afterDatasetsDraw: (chart: Chart) => {
const {
startRelativePixelPositionX,
endRelativePixelPositionX,
isDragging,
} = dragData;
if (startRelativePixelPositionX && endRelativePixelPositionX && isDragging) {
const left = Math.min(
startRelativePixelPositionX,
endRelativePixelPositionX,
);
const right = Math.max(
startRelativePixelPositionX,
endRelativePixelPositionX,
);
const top = chart.chartArea.top - 5;
const bottom = chart.chartArea.bottom + 5;
/* eslint-disable-next-line no-param-reassign */
chart.ctx.fillStyle = pluginOptions.color;
chart.ctx.fillRect(left, top, right - left, bottom - top);
}
},
};
return dragSelectPlugin;
};

View File

@@ -11,7 +11,7 @@ export const emptyGraph = {
ctx.textBaseline = 'middle';
ctx.font = '1.5rem sans-serif';
ctx.fillStyle = `${grey.primary}`;
ctx.fillText('No data to display', width / 2, height / 2);
ctx.fillText('No data', width / 2, height / 2);
ctx.restore();
},
};

View File

@@ -0,0 +1,164 @@
import { Chart, ChartEvent, ChartTypeRegistry, Plugin } from 'chart.js';
import * as ChartHelpers from 'chart.js/helpers';
// utils
import { ChartEventHandler, mergeDefaultOptions } from './utils';
export const intersectionCursorPluginId = 'intersection-cursor-plugin';
export type IntersectionCursorPluginOptions = {
color?: string;
dashSize?: number;
gapSize?: number;
};
export const defaultIntersectionCursorPluginOptions: Required<IntersectionCursorPluginOptions> = {
color: 'white',
dashSize: 3,
gapSize: 3,
};
export function createIntersectionCursorPluginOptions(
isEnabled: boolean,
color?: string,
dashSize?: number,
gapSize?: number,
): IntersectionCursorPluginOptions | false {
if (!isEnabled) {
return false;
}
return {
color,
dashSize,
gapSize,
};
}
function createMousemoveHandler(
chart: Chart,
cursorData: IntersectionCursorData,
): ChartEventHandler {
return (ev: ChartEvent | MouseEvent): void => {
const { left, right, top, bottom } = chart.chartArea;
let { x, y } = ChartHelpers.getRelativePosition(ev, chart);
if (left > x) {
x = left;
}
if (right < x) {
x = right;
}
if (y < top) {
y = top;
}
if (y > bottom) {
y = bottom;
}
cursorData.onMouseMove(x, y);
};
}
function createMouseoutHandler(
cursorData: IntersectionCursorData,
): ChartEventHandler {
return (): void => {
cursorData.onMouseOut();
};
}
class IntersectionCursorData {
public positionX: number | null | undefined;
public positionY: number | null | undefined;
public initialize(): void {
this.positionX = null;
this.positionY = null;
}
public onMouseMove(x: number | undefined, y: number | undefined): void {
this.positionX = x;
this.positionY = y;
}
public onMouseOut(): void {
this.positionX = null;
this.positionY = null;
}
}
export const createIntersectionCursorPlugin = (): Plugin<
keyof ChartTypeRegistry,
IntersectionCursorPluginOptions
> => {
const cursorData = new IntersectionCursorData();
let pluginOptions: Required<IntersectionCursorPluginOptions>;
let mousemoveHandler: (ev: ChartEvent | MouseEvent) => void;
let mouseoutHandler: (ev: ChartEvent | MouseEvent) => void;
const intersectionCursorPlugin: Plugin<
keyof ChartTypeRegistry,
IntersectionCursorPluginOptions
> = {
id: intersectionCursorPluginId,
start: (chart: Chart, _, passedOptions) => {
const { canvas } = chart;
cursorData.initialize();
pluginOptions = mergeDefaultOptions(
passedOptions,
defaultIntersectionCursorPluginOptions,
);
mousemoveHandler = createMousemoveHandler(chart, cursorData);
mouseoutHandler = createMouseoutHandler(cursorData);
canvas.addEventListener('mousemove', mousemoveHandler, { passive: true });
canvas.addEventListener('mouseout', mouseoutHandler, { passive: true });
},
beforeDestroy: (chart: Chart) => {
const { canvas } = chart;
if (!canvas) {
return;
}
canvas.removeEventListener('mousemove', mousemoveHandler);
canvas.removeEventListener('mouseout', mouseoutHandler);
},
afterDatasetsDraw: (chart: Chart) => {
const { positionX, positionY } = cursorData;
const lineDashData = [pluginOptions.dashSize, pluginOptions.gapSize];
if (typeof positionX === 'number' && typeof positionY === 'number') {
const { top, bottom, left, right } = chart.chartArea;
chart.ctx.beginPath();
/* eslint-disable-next-line no-param-reassign */
chart.ctx.strokeStyle = pluginOptions.color;
chart.ctx.setLineDash(lineDashData);
chart.ctx.moveTo(left, positionY);
chart.ctx.lineTo(right, positionY);
chart.ctx.stroke();
chart.ctx.beginPath();
chart.ctx.setLineDash(lineDashData);
/* eslint-disable-next-line no-param-reassign */
chart.ctx.strokeStyle = pluginOptions.color;
chart.ctx.moveTo(positionX, top);
chart.ctx.lineTo(positionX, bottom);
chart.ctx.stroke();
}
},
};
return intersectionCursorPlugin;
};

View File

@@ -22,85 +22,86 @@ const getOrCreateLegendList = (
listContainer.style.height = '100%';
listContainer.style.flexWrap = 'wrap';
listContainer.style.justifyContent = 'center';
listContainer.style.fontSize = '0.75rem';
legendContainer?.appendChild(listContainer);
}
return listContainer;
};
export const legend = (id: string, isLonger: boolean): Plugin<ChartType> => {
return {
id: 'htmlLegend',
afterUpdate(chart): void {
const ul = getOrCreateLegendList(chart, id || 'legend', isLonger);
export const legend = (id: string, isLonger: boolean): Plugin<ChartType> => ({
id: 'htmlLegend',
afterUpdate(chart): void {
const ul = getOrCreateLegendList(chart, id || 'legend', isLonger);
// Remove old legend items
while (ul.firstChild) {
ul.firstChild.remove();
}
// Remove old legend items
while (ul.firstChild) {
ul.firstChild.remove();
}
// Reuse the built-in legendItems generator
const items = get(chart, [
'options',
'plugins',
'legend',
'labels',
'generateLabels',
])
? get(chart, ['options', 'plugins', 'legend', 'labels', 'generateLabels'])(
chart,
)
: null;
// Reuse the built-in legendItems generator
const items = get(chart, [
'options',
'plugins',
'legend',
'labels',
'generateLabels',
])
? get(chart, ['options', 'plugins', 'legend', 'labels', 'generateLabels'])(
chart,
)
: null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
items?.forEach((item: Record<any, any>, index: number) => {
const li = document.createElement('li');
li.style.alignItems = 'center';
li.style.cursor = 'pointer';
li.style.display = 'flex';
li.style.marginLeft = '10px';
li.style.marginTop = '5px';
// eslint-disable-next-line @typescript-eslint/no-explicit-any
items?.forEach((item: Record<any, any>, index: number) => {
const li = document.createElement('li');
li.style.alignItems = 'center';
li.style.cursor = 'pointer';
li.style.display = 'flex';
li.style.marginLeft = '10px';
// li.style.marginTop = '5px';
li.onclick = (): void => {
const { type } = chart.config;
if (type === 'pie' || type === 'doughnut') {
// Pie and doughnut charts only have a single dataset and visibility is per item
chart.toggleDataVisibility(index);
} else {
chart.setDatasetVisibility(
item.datasetIndex,
!chart.isDatasetVisible(item.datasetIndex),
);
}
chart.update();
};
// Color box
const boxSpan = document.createElement('span');
boxSpan.style.background = `${item.strokeStyle}` || `${colors[0]}`;
boxSpan.style.borderColor = `${item?.strokeStyle}`;
boxSpan.style.borderWidth = `${item.lineWidth}px`;
boxSpan.style.display = 'inline-block';
boxSpan.style.minHeight = '20px';
boxSpan.style.marginRight = '10px';
boxSpan.style.minWidth = '20px';
boxSpan.style.borderRadius = '50%';
if (item.text) {
// Text
const textContainer = document.createElement('span');
textContainer.style.margin = '0';
textContainer.style.padding = '0';
textContainer.style.textDecoration = item.hidden ? 'line-through' : '';
const text = document.createTextNode(item.text);
textContainer.appendChild(text);
li.appendChild(boxSpan);
li.appendChild(textContainer);
ul.appendChild(li);
li.onclick = (): void => {
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
const { type } = chart.config;
if (type === 'pie' || type === 'doughnut') {
// Pie and doughnut charts only have a single dataset and visibility is per item
chart.toggleDataVisibility(index);
} else {
chart.setDatasetVisibility(
item.datasetIndex,
!chart.isDatasetVisible(item.datasetIndex),
);
}
});
},
};
};
chart.update();
};
// Color box
const boxSpan = document.createElement('span');
boxSpan.style.background = `${item.strokeStyle}` || `${colors[0]}`;
boxSpan.style.borderColor = `${item?.strokeStyle}`;
boxSpan.style.borderWidth = `${item.lineWidth}px`;
boxSpan.style.display = 'inline-block';
boxSpan.style.minHeight = '0.75rem';
boxSpan.style.marginRight = '0.5rem';
boxSpan.style.minWidth = '0.75rem';
boxSpan.style.borderRadius = '50%';
if (item.text) {
// Text
const textContainer = document.createElement('span');
textContainer.style.margin = '0';
textContainer.style.padding = '0';
textContainer.style.textDecoration = item.hidden ? 'line-through' : '';
const text = document.createTextNode(item.text);
textContainer.appendChild(text);
li.appendChild(boxSpan);
li.appendChild(textContainer);
ul.appendChild(li);
}
});
},
});

View File

@@ -0,0 +1,20 @@
import { ChartEvent } from 'chart.js';
export type ChartEventHandler = (ev: ChartEvent | MouseEvent) => void;
export function mergeDefaultOptions<T extends Record<string, unknown>>(
options: T,
defaultOptions: Required<T>,
): Required<T> {
const sanitizedOptions = { ...options };
Object.keys(options).forEach((key) => {
if (sanitizedOptions[key as keyof T] === undefined) {
delete sanitizedOptions[key as keyof T];
}
});
return {
...defaultOptions,
...sanitizedOptions,
};
}

View File

@@ -0,0 +1,8 @@
import { themeColors } from 'constants/theme';
export const getAxisLabelColor = (currentTheme: string): string => {
if (currentTheme === 'light') {
return themeColors.black;
}
return themeColors.whiteCream;
};

View File

@@ -23,14 +23,27 @@ import {
} from 'chart.js';
import * as chartjsAdapter from 'chartjs-adapter-date-fns';
import annotationPlugin from 'chartjs-plugin-annotation';
import React, { useCallback, useEffect, useRef } from 'react';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import AppReducer from 'types/reducer/app';
import dayjs from 'dayjs';
import { useIsDarkMode } from 'hooks/useDarkMode';
import isEqual from 'lodash-es/isEqual';
import React, { memo, useCallback, useEffect, useRef } from 'react';
import { hasData } from './hasData';
import { getAxisLabelColor } from './helpers';
import { legend } from './Plugin';
import {
createDragSelectPlugin,
createDragSelectPluginOptions,
dragSelectPluginId,
DragSelectPluginOptions,
} from './Plugin/DragSelect';
import { emptyGraph } from './Plugin/EmptyGraph';
import {
createIntersectionCursorPlugin,
createIntersectionCursorPluginOptions,
intersectionCursorPluginId,
IntersectionCursorPluginOptions,
} from './Plugin/IntersectionCursor';
import { LegendsContainer } from './styles';
import { useXAxisTimeUnit } from './xAxisConfig';
import { getToolTipValue, getYAxisFormattedValue } from './yAxisConfig';
@@ -66,9 +79,13 @@ function Graph({
forceReRender,
staticLine,
containerHeight,
onDragSelect,
dragSelectColor,
}: GraphProps): JSX.Element {
const { isDarkMode } = useSelector<AppState, AppReducer>((state) => state.app);
const nearestDatasetIndex = useRef<null | number>(null);
const chartRef = useRef<HTMLCanvasElement>(null);
const isDarkMode = useIsDarkMode();
const currentTheme = isDarkMode ? 'dark' : 'light';
const xAxisTimeUnit = useXAxisTimeUnit(data); // Computes the relevant time unit for x axis by analyzing the time stamp data
@@ -92,7 +109,7 @@ function Graph({
}
if (chartRef.current !== null) {
const options: ChartOptions = {
const options: CustomChartOptions = {
animation: {
duration: animate ? 200 : 0,
},
@@ -136,6 +153,10 @@ function Graph({
},
tooltip: {
callbacks: {
title(context) {
const date = dayjs(context[0].parsed.x);
return date.format('MMM DD, YYYY, HH:mm:ss');
},
label(context) {
let label = context.dataset.label || '';
@@ -145,10 +166,27 @@ function Graph({
if (context.parsed.y !== null) {
label += getToolTipValue(context.parsed.y.toString(), yAxisUnit);
}
return label;
},
labelTextColor(labelData) {
if (labelData.datasetIndex === nearestDatasetIndex.current) {
return 'rgba(255, 255, 255, 1)';
}
return 'rgba(255, 255, 255, 0.75)';
},
},
},
[dragSelectPluginId]: createDragSelectPluginOptions(
!!onDragSelect,
onDragSelect,
dragSelectColor,
),
[intersectionCursorPluginId]: createIntersectionCursorPluginOptions(
!!onDragSelect,
currentTheme === 'dark' ? 'white' : 'black',
),
},
layout: {
padding: 0,
@@ -178,6 +216,7 @@ function Graph({
},
},
type: 'time',
ticks: { color: getAxisLabelColor(currentTheme) },
},
y: {
display: true,
@@ -186,6 +225,7 @@ function Graph({
color: getGridColor(),
},
ticks: {
color: getAxisLabelColor(currentTheme),
// Include a dollar sign in the ticks
callback(value) {
return getYAxisFormattedValue(value.toString(), yAxisUnit);
@@ -201,18 +241,50 @@ function Graph({
tension: 0,
cubicInterpolationMode: 'monotone',
},
point: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
hoverBackgroundColor: (ctx: any) => {
if (ctx?.element?.options?.borderColor) {
return ctx.element.options.borderColor;
}
return 'rgba(0,0,0,0.1)';
},
hoverRadius: 5,
},
},
onClick: (event, element, chart) => {
if (onClickHandler) {
onClickHandler(event, element, chart, data);
}
},
onHover: (event, _, chart) => {
if (event.native) {
const interactions = chart.getElementsAtEventForMode(
event.native,
'nearest',
{
intersect: false,
},
true,
);
if (interactions[0]) {
nearestDatasetIndex.current = interactions[0].datasetIndex;
}
}
},
};
const chartHasData = hasData(data);
const chartPlugins = [];
if (!chartHasData) chartPlugins.push(emptyGraph);
if (chartHasData) {
chartPlugins.push(createIntersectionCursorPlugin());
chartPlugins.push(createDragSelectPlugin());
} else {
chartPlugins.push(emptyGraph);
}
chartPlugins.push(legend(name, data.datasets.length > 3));
lineChartRef.current = new Chart(chartRef.current, {
@@ -235,6 +307,9 @@ function Graph({
yAxisUnit,
onClickHandler,
staticLine,
onDragSelect,
dragSelectColor,
currentTheme,
]);
useEffect(() => {
@@ -249,6 +324,13 @@ function Graph({
);
}
type CustomChartOptions = ChartOptions & {
plugins: {
[dragSelectPluginId]: DragSelectPluginOptions | false;
[intersectionCursorPluginId]: IntersectionCursorPluginOptions | false;
};
};
interface GraphProps {
animate?: boolean;
type: ChartType;
@@ -261,6 +343,8 @@ interface GraphProps {
forceReRender?: boolean | null | number;
staticLine?: StaticLineProps | undefined;
containerHeight?: string | number;
onDragSelect?: (start: number, end: number) => void;
dragSelectColor?: string;
}
export interface StaticLineProps {
@@ -287,6 +371,11 @@ Graph.defaultProps = {
yAxisUnit: undefined,
forceReRender: undefined,
staticLine: undefined,
containerHeight: '85%',
containerHeight: '90%',
onDragSelect: undefined,
dragSelectColor: undefined,
};
export default Graph;
export default memo(Graph, (prevProps, nextProps) =>
isEqual(prevProps.data, nextProps.data),
);

View File

@@ -1,14 +1,28 @@
import { themeColors } from 'constants/theme';
import styled from 'styled-components';
export const LegendsContainer = styled.div`
height: 15%;
height: 10%;
* {
::-webkit-scrollbar {
display: none !important;
width: 0.3rem;
}
::-webkit-scrollbar:horizontal {
height: 0.3rem;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: ${themeColors.royalGrey};
border-radius: 0.625rem;
}
::-webkit-scrollbar-thumb:hover {
background: ${themeColors.matterhornGrey};
}
::-webkit-scrollbar-corner {
background: transparent;
}
-ms-overflow-style: none !important; /* IE and Edge */
scrollbar-width: none !important; /* Firefox */
}
`;

View File

@@ -1,46 +1,23 @@
import { Button, Popover } from 'antd';
import getStep from 'lib/getStep';
import { generateFilterQuery } from 'lib/logs/generateFilterQuery';
import React, { memo, useCallback, useMemo } from 'react';
import { connect, useDispatch, useSelector } from 'react-redux';
import { bindActionCreators, Dispatch } from 'redux';
import { ThunkDispatch } from 'redux-thunk';
import { getLogs } from 'store/actions/logs/getLogs';
import { getLogsAggregate } from 'store/actions/logs/getLogsAggregate';
import { useDispatch, useSelector } from 'react-redux';
import { Dispatch } from 'redux';
import { AppState } from 'store/reducers';
import AppActions from 'types/actions';
import { SET_SEARCH_QUERY_STRING, TOGGLE_LIVE_TAIL } from 'types/actions/logs';
import { GlobalReducer } from 'types/reducer/globalTime';
import { SET_SEARCH_QUERY_STRING } from 'types/actions/logs';
import { ILogsReducer } from 'types/reducer/logs';
interface AddToQueryHOCProps {
fieldKey: string;
fieldValue: string;
children: React.ReactNode;
getLogs: (props: Parameters<typeof getLogs>[0]) => ReturnType<typeof getLogs>;
getLogsAggregate: (
props: Parameters<typeof getLogsAggregate>[0],
) => ReturnType<typeof getLogsAggregate>;
}
function AddToQueryHOC({
fieldKey,
fieldValue,
children,
getLogs,
getLogsAggregate,
}: AddToQueryHOCProps): JSX.Element {
const {
searchFilter: { queryString },
logLinesPerPage,
idStart,
idEnd,
liveTail,
} = useSelector<AppState, ILogsReducer>((store) => store.logs);
const dispatch = useDispatch();
const dispatch = useDispatch<Dispatch<AppActions>>();
const { maxTime, minTime } = useSelector<AppState, GlobalReducer>(
(state) => state.globalTime,
);
const generatedQuery = useMemo(
() => generateFilterQuery({ fieldKey, fieldValue, type: 'IN' }),
[fieldKey, fieldValue],
@@ -56,71 +33,18 @@ function AddToQueryHOC({
}
dispatch({
type: SET_SEARCH_QUERY_STRING,
payload: updatedQueryString,
payload: {
searchQueryString: updatedQueryString,
},
});
if (liveTail === 'STOPPED') {
getLogs({
q: updatedQueryString,
limit: logLinesPerPage,
orderBy: 'timestamp',
order: 'desc',
timestampStart: minTime,
timestampEnd: maxTime,
...(idStart ? { idGt: idStart } : {}),
...(idEnd ? { idLt: idEnd } : {}),
});
getLogsAggregate({
timestampStart: minTime,
timestampEnd: maxTime,
step: getStep({
start: minTime,
end: maxTime,
inputFormat: 'ns',
}),
q: updatedQueryString,
...(idStart ? { idGt: idStart } : {}),
...(idEnd ? { idLt: idEnd } : {}),
});
} else if (liveTail === 'PLAYING') {
dispatch({
type: TOGGLE_LIVE_TAIL,
payload: 'PAUSED',
});
setTimeout(
() =>
dispatch({
type: TOGGLE_LIVE_TAIL,
payload: liveTail,
}),
0,
);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [
dispatch,
generatedQuery,
getLogs,
idEnd,
idStart,
logLinesPerPage,
maxTime,
minTime,
queryString,
}, [dispatch, generatedQuery, queryString]);
const popOverContent = useMemo(() => <span>Add to query: {fieldKey}</span>, [
fieldKey,
]);
const popOverContent = (
<span style={{ fontSize: '0.9rem' }}>Add to query: {fieldKey}</span>
);
return (
<Button
size="small"
type="text"
style={{
margin: 0,
padding: 0,
}}
onClick={handleQueryAdd}
>
<Button size="small" type="text" onClick={handleQueryAdd}>
<Popover placement="top" content={popOverContent}>
{children}
</Popover>
@@ -128,20 +52,10 @@ function AddToQueryHOC({
);
}
interface DispatchProps {
getLogs: (
props: Parameters<typeof getLogs>[0],
) => (dispatch: Dispatch<AppActions>) => void;
getLogsAggregate: (
props: Parameters<typeof getLogsAggregate>[0],
) => (dispatch: Dispatch<AppActions>) => void;
interface AddToQueryHOCProps {
fieldKey: string;
fieldValue: string;
children: React.ReactNode;
}
const mapDispatchToProps = (
dispatch: ThunkDispatch<unknown, unknown, AppActions>,
): DispatchProps => ({
getLogs: bindActionCreators(getLogs, dispatch),
getLogsAggregate: bindActionCreators(getLogsAggregate, dispatch),
});
export default connect(null, mapDispatchToProps)(memo(AddToQueryHOC));
export default memo(AddToQueryHOC);

View File

@@ -1,29 +1,28 @@
import { Popover } from 'antd';
import React from 'react';
import { useNotifications } from 'hooks/useNotifications';
import React, { useCallback, useEffect } from 'react';
import { useCopyToClipboard } from 'react-use';
interface CopyClipboardHOCProps {
textToCopy: string;
children: React.ReactNode;
}
function CopyClipboardHOC({
textToCopy,
children,
}: CopyClipboardHOCProps): JSX.Element {
const [, setCopy] = useCopyToClipboard();
const [value, setCopy] = useCopyToClipboard();
const { notifications } = useNotifications();
useEffect(() => {
if (value.value) {
notifications.success({
message: 'Copied to clipboard',
});
}
}, [value, notifications]);
const onClick = useCallback((): void => {
setCopy(textToCopy);
}, [setCopy, textToCopy]);
return (
<span
style={{
margin: 0,
padding: 0,
cursor: 'pointer',
}}
onClick={(): void => setCopy(textToCopy)}
onKeyDown={(): void => setCopy(textToCopy)}
role="button"
tabIndex={0}
>
<span onClick={onClick} onKeyDown={onClick} role="button" tabIndex={0}>
<Popover
placement="top"
content={<span style={{ fontSize: '0.9rem' }}>Copy to clipboard</span>}
@@ -34,4 +33,9 @@ function CopyClipboardHOC({
);
}
interface CopyClipboardHOCProps {
textToCopy: string;
children: React.ReactNode;
}
export default CopyClipboardHOC;

View File

@@ -3,18 +3,23 @@ import { CopyFilled, ExpandAltOutlined } from '@ant-design/icons';
import { Button, Divider, Row, Typography } from 'antd';
import { map } from 'd3';
import dayjs from 'dayjs';
import { useNotifications } from 'hooks/useNotifications';
// utils
import { FlatLogData } from 'lib/logs/flatLogData';
import React, { useCallback, useMemo } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { useCopyToClipboard } from 'react-use';
// interfaces
import { AppState } from 'store/reducers';
import { SET_DETAILED_LOG_DATA } from 'types/actions/logs';
import { ILog } from 'types/api/logs/log';
import { ILogsReducer } from 'types/reducer/logs';
// components
import AddToQueryHOC from '../AddToQueryHOC';
import CopyClipboardHOC from '../CopyClipboardHOC';
import { Container } from './styles';
// styles
import { Container, LogContainer, Text, TextContainer } from './styles';
import { isValidLogField } from './util';
interface LogFieldProps {
@@ -23,23 +28,20 @@ interface LogFieldProps {
}
function LogGeneralField({ fieldKey, fieldValue }: LogFieldProps): JSX.Element {
return (
<div
style={{
display: 'flex',
overflow: 'hidden',
width: '100%',
}}
>
<Typography.Text type="secondary">{fieldKey}</Typography.Text>
<TextContainer>
<Text ellipsis type="secondary">
{fieldKey}
</Text>
<CopyClipboardHOC textToCopy={fieldValue}>
<Typography.Text ellipsis>
{': '}
{fieldValue}
</Typography.Text>
</CopyClipboardHOC>
</div>
</TextContainer>
);
}
function LogSelectedField({
fieldKey = '',
fieldValue = '',
@@ -73,16 +75,19 @@ function LogSelectedField({
);
}
interface LogItemProps {
interface ListLogViewProps {
logData: ILog;
}
function LogItem({ logData }: LogItemProps): JSX.Element {
function ListLogView({ logData }: ListLogViewProps): JSX.Element {
const {
fields: { selected },
} = useSelector<AppState, ILogsReducer>((state) => state.logs);
const dispatch = useDispatch();
const flattenLogData = useMemo(() => FlatLogData(logData), [logData]);
const [, setCopy] = useCopyToClipboard();
const { notifications } = useNotifications();
const handleDetailedView = useCallback(() => {
dispatch({
@@ -93,40 +98,40 @@ function LogItem({ logData }: LogItemProps): JSX.Element {
const handleCopyJSON = (): void => {
setCopy(JSON.stringify(logData, null, 2));
notifications.success({
message: 'Copied to clipboard',
});
};
return (
<Container>
<div style={{ maxWidth: '100%' }}>
<div>
<div>
{'{'}
<div style={{ marginLeft: '0.5rem' }}>
<LogGeneralField
fieldKey="log"
fieldValue={flattenLogData.body as never}
/>
{flattenLogData.stream && (
<LogContainer>
<>
<LogGeneralField fieldKey="log" fieldValue={flattenLogData.body} />
{flattenLogData.stream && (
<LogGeneralField fieldKey="stream" fieldValue={flattenLogData.stream} />
)}
<LogGeneralField
fieldKey="stream"
fieldValue={flattenLogData.stream as never}
fieldKey="timestamp"
fieldValue={dayjs((flattenLogData.timestamp as never) / 1e6).format()}
/>
)}
<LogGeneralField
fieldKey="timestamp"
fieldValue={dayjs((flattenLogData.timestamp as never) / 1e6).format()}
/>
</div>
</>
</LogContainer>
{'}'}
</div>
<div>
{map(selected, (field) => {
return isValidLogField(flattenLogData[field.name] as never) ? (
{map(selected, (field) =>
isValidLogField(flattenLogData[field.name] as never) ? (
<LogSelectedField
key={field.name}
fieldKey={field.name}
fieldValue={flattenLogData[field.name] as never}
/>
) : null;
})}
) : null,
)}
</div>
</div>
<Divider style={{ padding: 0, margin: '0.4rem 0', opacity: 0.5 }} />
@@ -154,4 +159,4 @@ function LogItem({ logData }: LogItemProps): JSX.Element {
);
}
export default LogItem;
export default ListLogView;

View File

@@ -1,4 +1,4 @@
import { Card } from 'antd';
import { Card, Typography } from 'antd';
import styled, { keyframes } from 'styled-components';
const fadeInAnimation = keyframes`
@@ -16,3 +16,20 @@ export const Container = styled(Card)`
animation-duration: 0.2s;
animation-timing-function: ease-in;
`;
export const Text = styled(Typography.Text)`
&&& {
min-width: 1.5rem;
white-space: nowrap;
}
`;
export const TextContainer = styled.div`
display: flex;
overflow: hidden;
width: 100%;
`;
export const LogContainer = styled.div`
margin-left: 0.5rem;
`;

View File

@@ -0,0 +1,5 @@
export const rawLineStyle: React.CSSProperties = {
marginBottom: 0,
fontFamily: "'Fira Code', monospace",
fontWeight: 300,
};

View File

@@ -0,0 +1,48 @@
import { ExpandAltOutlined } from '@ant-design/icons';
import { Typography } from 'antd';
import dayjs from 'dayjs';
// hooks
import { useIsDarkMode } from 'hooks/useDarkMode';
import React, { useCallback, useMemo } from 'react';
// interfaces
import { ILog } from 'types/api/logs/log';
import { rawLineStyle } from './config';
// styles
import { ExpandIconWrapper, RawLogViewContainer } from './styles';
interface RawLogViewProps {
data: ILog;
linesPerRow: number;
onClickExpand: (log: ILog) => void;
}
function RawLogView(props: RawLogViewProps): JSX.Element {
const { data, linesPerRow, onClickExpand } = props;
const isDarkMode = useIsDarkMode();
const text = useMemo(
() => `${dayjs(data.timestamp / 1e6).format()} | ${data.body}`,
[data.timestamp, data.body],
);
const ellipsis = useMemo(() => ({ rows: linesPerRow }), [linesPerRow]);
const handleClickExpand = useCallback(() => {
onClickExpand(data);
}, [onClickExpand, data]);
return (
<RawLogViewContainer wrap={false} align="middle" $isDarkMode={isDarkMode}>
<ExpandIconWrapper flex="30px" onClick={handleClickExpand}>
<ExpandAltOutlined />
</ExpandIconWrapper>
<Typography.Paragraph style={rawLineStyle} ellipsis={ellipsis}>
{text}
</Typography.Paragraph>
</RawLogViewContainer>
);
}
export default RawLogView;

View File

@@ -0,0 +1,24 @@
import { blue } from '@ant-design/colors';
import { Col, Row } from 'antd';
import styled from 'styled-components';
export const RawLogViewContainer = styled(Row)<{ $isDarkMode: boolean }>`
width: 100%;
font-weight: 700;
font-size: 0.625rem;
line-height: 1.25rem;
transition: background-color 0.2s ease-in;
&:hover {
background-color: ${({ $isDarkMode }): string =>
$isDarkMode ? 'rgba(255,255,255,0.1)' : 'rgba(0, 0, 0, 0.1)'};
}
`;
export const ExpandIconWrapper = styled(Col)`
color: ${blue[6]};
padding: 0.25rem 0.375rem;
cursor: pointer;
font-size: 12px;
`;

View File

@@ -0,0 +1,12 @@
import { TableProps } from 'antd';
export const defaultCellStyle: React.CSSProperties = {
paddingTop: 4,
paddingBottom: 6,
paddingRight: 8,
paddingLeft: 8,
};
export const tableScroll: TableProps<Record<string, unknown>>['scroll'] = {
x: true,
};

View File

@@ -0,0 +1,106 @@
import { ExpandAltOutlined } from '@ant-design/icons';
import { Table, Typography } from 'antd';
import { ColumnsType, ColumnType } from 'antd/es/table';
import dayjs from 'dayjs';
// utils
import { FlatLogData } from 'lib/logs/flatLogData';
import React, { useMemo } from 'react';
import { IField } from 'types/api/logs/fields';
// interfaces
import { ILog } from 'types/api/logs/log';
// styles
import { ExpandIconWrapper } from '../RawLogView/styles';
// config
import { defaultCellStyle, tableScroll } from './config';
type ColumnTypeRender<T = unknown> = ReturnType<
NonNullable<ColumnType<T>['render']>
>;
type LogsTableViewProps = {
logs: ILog[];
fields: IField[];
linesPerRow: number;
onClickExpand: (log: ILog) => void;
};
function LogsTableView(props: LogsTableViewProps): JSX.Element {
const { logs, fields, linesPerRow, onClickExpand } = props;
const flattenLogData = useMemo(() => logs.map((log) => FlatLogData(log)), [
logs,
]);
const columns: ColumnsType<Record<string, unknown>> = useMemo(() => {
const fieldColumns: ColumnsType<Record<string, unknown>> = fields.map(
({ name }) => ({
title: name,
dataIndex: name,
key: name,
render: (field): ColumnTypeRender<Record<string, unknown>> => ({
props: {
style: defaultCellStyle,
},
children: (
<Typography.Paragraph ellipsis={{ rows: linesPerRow }}>
{field}
</Typography.Paragraph>
),
}),
}),
);
return [
{
title: '',
dataIndex: 'id',
key: 'expand',
// https://github.com/ant-design/ant-design/discussions/36886
render: (_, item): ColumnTypeRender<Record<string, unknown>> => ({
props: {
style: defaultCellStyle,
},
children: (
<ExpandIconWrapper
onClick={(): void => {
onClickExpand((item as unknown) as ILog);
}}
>
<ExpandAltOutlined />
</ExpandIconWrapper>
),
}),
},
{
title: 'Timestamp',
dataIndex: 'timestamp',
key: 'timestamp',
// https://github.com/ant-design/ant-design/discussions/36886
render: (field): ColumnTypeRender<Record<string, unknown>> => {
const date = dayjs(field / 1e6).format();
return {
props: {
style: defaultCellStyle,
},
children: <span>{date}</span>,
};
},
},
...fieldColumns,
];
}, [fields, linesPerRow, onClickExpand]);
return (
<Table
columns={columns}
dataSource={flattenLogData}
pagination={false}
rowKey="id"
bordered
scroll={tableScroll}
/>
);
}
export default LogsTableView;

Some files were not shown because too many files have changed in this diff Show More