Compare commits

...

159 Commits

Author SHA1 Message Date
Ankit Nayan
a118c3c8a1 adding frontend image tags 0.3.1 2021-06-08 22:36:17 +05:30
Ankit Nayan
9baf873521 Merge pull request #170 from SigNoz/fix-serviceMap-zoom
(Fix) - serviceMap zooms correctly
2021-06-07 20:58:53 +05:30
Ankit Nayan
12911db945 (Fix) - serviceMap zooms correctly 2021-06-07 20:58:02 +05:30
Yash Joshi
bd149f4364 fix: trace graph styles (#165)
- Prevent vertical shift on hover due to border
- Show faded traces
2021-06-07 20:51:57 +05:30
Anwesh Nayak
c69b9ae62a feat(docs): Update bug_report.md and add version section (#152)
* feat(docs): Update bug_report.md and add version section

* fix(docs): remove duplicate additional context error in markdown

Co-authored-by: anweshknayak <anweshnayak@Anweshs-MacBook-Air.local>
2021-06-07 20:50:26 +05:30
Ankit Anand
bc3f16d3de changed query-service image tag to 0.3.1 and druid image tag to 0.21.1-rc2 2021-06-07 17:55:26 +05:30
Ankit Nayan
61bbd5551b Merge pull request #168 from SigNoz/druid_permission_fix
Druid permission fix
2021-06-07 17:24:53 +05:30
Ankit Anand
286577d13d added new druid image to fix docker permission issue 2021-06-07 17:23:34 +05:30
Ankit Anand
dbd0701779 delete test folder for interface implementation 2021-06-07 16:59:40 +05:30
Ankit Anand
0c7a5ce3c7 added limit to serviceMapDependencies query to druid 2021-06-07 16:59:13 +05:30
Pranay Prateek
a92381df1b Merge pull request #167 from SigNoz/ankit01-oss-patch-1
Updated description
2021-06-07 12:32:35 +05:30
Ankit Anand
eb1509d385 Updated description 2021-06-07 12:27:22 +05:30
Ankit Nayan
34e33af290 Merge pull request #156 from SigNoz/check-antd-css
refactor: remove antd unused css
2021-06-05 19:08:18 +05:30
Ankit Nayan
c0004cd51c Merge pull request #157 from jyash97/fix/suspense-loader
fix: suspense for lazy loaded pages
2021-06-05 17:29:24 +05:30
Ankit Nayan
10bf545c65 Merge branch 'main' into fix/suspense-loader 2021-06-05 17:29:09 +05:30
Ankit Nayan
7d2bcf11c3 Merge pull request #150 from jyash97/fix/router-prop
fix: react-router prop
2021-06-05 16:59:55 +05:30
Yash Joshi
3ff7ace54e fix: suspense for lazy loaded pages
Add suspense to track the route splitted modules, this will make sure that the sidebar and navbar is not unmounted whenever modules start fetching.
2021-06-05 12:38:37 +05:30
Nidhi Tandon
abdfe6ccc5 chore: remove unused files 2021-06-05 11:50:25 +05:30
Ankit Nayan
aa398263fb Merge pull request #155 from SigNoz/installation-default-clickhouse
choose clickhouse on enter press
2021-06-05 11:50:12 +05:30
Ankit Anand
ace02486e0 choose clickhouse on enter press 2021-06-05 11:48:44 +05:30
Yash Joshi
b318ba6b2f fix: router prop name 2021-06-04 11:54:18 +05:30
Nidhi Tandon
de4be411f4 refactor: remove antd unused css 2021-06-03 21:25:29 +05:30
Ankit Nayan
362f264bae Installation changes for docker (#149)
* installation steps WIP

* changing install.sh

* fixes

* fixes

* fixes

* handled enter key press in setup_type

* fixes

* fixes

* fixes

Co-authored-by: Ankit Anand <cruxaki@gmail.com>
2021-06-03 20:54:41 +05:30
Pranay Prateek
e94d984cdb Merge pull request #148 from jyash97/patch-1
docs: update slack invite link
2021-06-03 17:24:02 +05:30
Yash Joshi
bf0267d579 docs: update slack invite link 2021-06-03 15:32:36 +05:30
Ankit Nayan
e4b3ea1f34 Merge pull request #145 from SigNoz/spanAggregatesAPI
added spansAggregate API implementation for clickhouse
2021-06-02 18:34:56 +05:30
Ankit Anand
4ee6d4b546 added spansAggregate API implementation for clickhouse 2021-06-02 18:34:03 +05:30
Ankit Anand
a7836c26d0 kubernetes configs updated 2021-06-02 11:45:00 +05:30
Ankit Anand
15eb5364d5 added healthcheck to druid router service and added it as dependency of query-service 2021-06-02 00:43:30 +05:30
Ankit Anand
47bf512a33 added healthcheck for query service to wait for router service 2021-06-01 17:17:12 +05:30
Ankit Anand
2776bfa311 added nginx config for gzip 2021-06-01 16:45:45 +05:30
Ankit Anand
8c7ac88f84 added STORAGE env variable to deployment templte 2021-06-01 16:23:19 +05:30
Ankit Anand
a08ad9e2cf changed image versions to 0.3.0 2021-06-01 16:22:50 +05:30
Ankit Anand
d312398f18 changed values for release 0.3.0 2021-06-01 15:57:21 +05:30
Ankit Nayan
d891c3e118 Merge pull request #144 from SigNoz/query_refactor
Query Service refactor to add interface for APIs
2021-06-01 15:17:19 +05:30
Ankit Anand
1e7b68203f added interface for spanAggregates API 2021-06-01 15:13:48 +05:30
Ankit Anand
3d152e23cd Merge branch 'main' into query_refactor 2021-06-01 11:50:27 +05:30
Ankit Nayan
47cf1eebf7 Merge pull request #143 from SigNoz/test-dropdown-fix
fix: add dark and compact theme css to index.css
2021-06-01 11:37:29 +05:30
Nidhi Tandon
6c84882dca fix: add css back to assets 2021-06-01 11:14:20 +05:30
Ankit Anand
a4424eca0e changed to port 8080 2021-06-01 10:00:48 +05:30
Ankit Anand
77992a59bc GetServices API sorted by p99 desc 2021-05-31 23:48:10 +05:30
Ankit Anand
3cbb071138 Merge branch 'main' into query_refactor 2021-05-31 22:33:47 +05:30
Ankit Nayan
9cd6e5cabe Merge pull request #140 from SigNoz/change-p90
feat: update p90 to p95
2021-05-31 22:01:29 +05:30
Nidhi Tandon
13bec63fca feat: update p90 to p95 2021-05-31 21:59:36 +05:30
Ankit Nayan
f2164a1a86 Merge pull request #138 from SigNoz/change-p90
feat: update response param p90 to p95 for /top_endpoints
2021-05-31 21:58:20 +05:30
Nidhi Tandon
8a4f58e77b Merge branch 'main' into change-p90
# Conflicts:
#	frontend/src/store/actions/metrics.ts
#	frontend/src/store/reducers/metrics.ts
2021-05-31 21:56:25 +05:30
Ankit Nayan
51a24673b9 Merge pull request #139 from SigNoz/update-zoom-px
feat: update zoom pixels based on screen width
2021-05-31 21:55:29 +05:30
Ankit Nayan
c94feb9af2 Merge pull request #136 from SigNoz/refactor-metrics-reducer
refactor(FE: Reducers): metrics reducers & actions
2021-05-31 21:54:06 +05:30
Nidhi Tandon
a8668d19a8 Merge branch 'main' into refactor-metrics-reducer
# Conflicts:
#	frontend/src/store/reducers/index.ts
2021-05-31 21:52:47 +05:30
Ankit Nayan
a8e81c9666 Merge pull request #133 from SigNoz/remove-bundle-analyzer
ci(FE): remove webpack bundle analyzer
2021-05-31 21:49:04 +05:30
Ankit Nayan
2eed75560d Merge pull request #130 from SigNoz/refactor-redux
refactor(FE: traceFilters): remove multiple reducers
2021-05-31 21:48:41 +05:30
Nidhi Tandon
8d6fb7f897 feat: update zoom pixels based on screen width 2021-05-31 21:45:03 +05:30
Nidhi Tandon
4cd0088029 fix: move traces actions to common action types 2021-05-31 21:26:27 +05:30
Nidhi Tandon
872c8adbbb feat: update response param p90 to p95 2021-05-31 21:06:39 +05:30
Ankit Anand
bba7344bae fixes for CH API implementations 2021-05-31 18:05:54 +05:30
Ankit Anand
51fe634566 More methods from interface implemented for ClickHouse 2021-05-31 11:14:11 +05:30
Nidhi Tandon
af58d085a0 feat(FE: Reducers): Combine Metrics reducers and refactor Metrics actions 2021-05-30 19:07:37 +05:30
Nidhi Tandon
5b9b344816 chore(FE): remove webpack bundle analyzer 2021-05-30 12:41:30 +05:30
Ankit Nayan
1caa07e0af Merge pull request #131 from SigNoz/gzip
ci: gzip bundle
2021-05-30 11:58:08 +05:30
Ankit Nayan
ae23cec8d6 Merge pull request #132 from SigNoz/enable-gzip-frontend
added gzip config to nginx conf file
2021-05-30 11:57:25 +05:30
Ankit Anand
5afc04f205 added gzip config to nginx conf file 2021-05-30 11:55:47 +05:30
Ankit Anand
6aed23ce66 clickhouse implementation WIP 2021-05-30 11:14:55 +05:30
Nidhi Tandon
007e2e7b78 ci: gzip bundle 2021-05-30 10:39:19 +05:30
Ankit Anand
762a3cdfcd dbOverview API with nullable string 2021-05-29 22:15:49 +05:30
Nidhi Tandon
308f8f8fed refactor(reducers): remove multiple reducers 2021-05-29 16:46:48 +05:30
Ankit Anand
588bf2b93a Merge branch 'main' into query_refactor 2021-05-29 16:37:46 +05:30
Ankit Anand
fff38b58d2 span search api working 2021-05-29 16:32:11 +05:30
Anwesh Nayak
cbd2036613 fix(script): add message to kill docker containers (#128)
* fix(script): add message to kill docker containers

* fix(script): add message to kill docker containers

* fix(script): add message to kill docker containers

Co-authored-by: anweshknayak <anweshnayak@Anweshs-MacBook-Air.local>
2021-05-29 13:13:39 +05:30
Ankit Nayan
7ef72d4147 Merge pull request #125 from anweshknayak/doc-fix
fix(doc): correct doc url for troubleshooting
2021-05-29 12:24:10 +05:30
anweshknayak
07af5c843a fix(doc): correct doc url for troubleshooting 2021-05-28 20:51:11 +05:30
Ankit Anand
e524ce5743 Merge branch 'main' into query_refactor 2021-05-28 11:43:38 +05:30
Ankit Nayan
24e1346521 Merge pull request #122 from SigNoz/reduce-bundle-size
refactor: remove unused lib and code
2021-05-27 13:35:56 +05:30
Ankit Anand
62e77613a6 sample API working in CH 2021-05-27 12:52:34 +05:30
Nidhi Tandon
56c0265660 refactor: remove unused lib and code 2021-05-26 20:38:18 +05:30
Ankit Nayan
91b1d08dff Merge pull request #119 from SigNoz/fix-endpoints-css
fix(css): end points overflow issue
2021-05-25 12:40:52 +05:30
Nidhi Tandon
239c2cb859 feat(css): add tooltip to button hover & fix css 2021-05-24 21:48:01 +05:30
Nidhi Tandon
4173258d0a fix: end points overflow issue 2021-05-23 17:04:47 +05:30
Ankit Nayan
1cbbdd8265 Merge pull request #118 from SigNoz/fix-api-call-twice
fix: call api with update value
2021-05-23 17:02:56 +05:30
Ankit Anand
433f3f3d94 clickhouse implementation WIP 2021-05-23 16:45:00 +05:30
Nidhi Tandon
fed23a6ab9 chore: add comments 2021-05-23 16:06:40 +05:30
Nidhi Tandon
b979c24cb4 refactor: remove unused prop 2021-05-23 15:43:38 +05:30
Nidhi Tandon
e4b41b1a27 feat: load data based on isLoaded flag 2021-05-23 15:40:48 +05:30
Nidhi Tandon
44495b7669 feat(ServiceMap): dispatch isLoaded route via context 2021-05-23 14:15:13 +05:30
Pranay Prateek
cc3133b2d6 Update README.md 2021-05-22 21:49:56 +05:30
Ankit Anand
9c83319143 interface working with druid APIs 2021-05-22 19:51:56 +05:30
Pranay Prateek
571c08c58e Update issue templates 2021-05-22 19:01:39 +05:30
Pranay Prateek
092cfc7804 Update issue templates 2021-05-22 18:38:31 +05:30
Nidhi Tandon
245050aac2 fix(ServiceMap): multiple api calls of date picker 2021-05-22 17:26:16 +05:30
Ankit Anand
606fa6591d added test folder for testing interface 2021-05-22 13:35:30 +05:30
Ankit Anand
55f7f56acf releasing v0.2.2 2021-05-18 16:55:49 +05:30
Ankit Nayan
e6b3a6c9db Merge pull request #107 from SigNoz/issues-106
Display upto 20 characters in name of service in ServiceMap
2021-05-17 18:03:32 +05:30
DIO
d6884cacdb Merge branch 'main' into issues-106 2021-05-17 17:43:04 +05:30
Ankit Nayan
bb155d2356 Merge pull request #109 from SigNoz/issues-93
Add default view in dropdown service-picker in ServiceMap
2021-05-17 17:23:26 +05:30
dhrubesh
c49ffd83a3 remove logs 2021-05-17 14:43:20 +05:30
dhrubesh
8a5178f0dc adds default view option 2021-05-17 14:42:39 +05:30
dhrubesh
057fba112b updates max length 2021-05-17 14:29:35 +05:30
Ankit Nayan
4c0b81b5c7 Merge pull request #108 from SigNoz/remove-zoom-post-stable
Removes abrupt zoom post becoming stable
2021-05-17 11:58:32 +05:30
dhrubesh
1d2f964a63 updates text color 2021-05-17 09:39:20 +05:30
dhrubesh
171fd714de removes zoom post becoming stable 2021-05-17 09:06:05 +05:30
Ankit Anand
789880fa07 changing constants for zoom-in for different screen sizes 2021-05-16 19:55:36 +05:30
Ankit Nayan
f25edf1e29 Merge pull request #102 from SigNoz/issue-92
Change time range in api call of Service Map to 1 min from latest
2021-05-16 19:47:45 +05:30
dhrubesh
c6e2e297d5 resolves conflicts 2021-05-16 18:44:26 +05:30
dhrubesh
2bc01e50bd Merge branch 'issue-92' of github.com-dhrubesh:SigNoz/signoz into issue-92 2021-05-16 18:36:18 +05:30
dhrubesh
38770809e3 handles route specific default value connected to localstorage 2021-05-16 18:35:50 +05:30
Ankit Nayan
9dd9f1133b Merge pull request #104 from SigNoz/issue-103-
Fixes multiple re-renders
2021-05-16 17:23:55 +05:30
Ankit Nayan
8b743f7803 Merge branch 'issue-92' into issue-103- 2021-05-16 17:23:44 +05:30
Ankit Nayan
868b7691b3 Merge pull request #105 from SigNoz/issue-95
Calculate zoom px based on screen size
2021-05-16 17:21:38 +05:30
Ankit Nayan
613e6ba5f9 Merge pull request #106 from SigNoz/issue-97
Adds tooltip on hover
2021-05-16 17:21:25 +05:30
dhrubesh
8fe2fe5aec adds a utility function to transform label 2021-05-16 15:50:32 +05:30
dhrubesh
55a7b5b1b3 adds tooltip on hover 2021-05-16 15:08:31 +05:30
dhrubesh
8b0abbec79 adds default options config by route 2021-05-15 23:24:53 +05:30
dhrubesh
24416ceabd adds width to Select 2021-05-15 19:50:16 +05:30
dhrubesh
2482e91348 calculate zoom px based on screen size 2021-05-15 18:23:29 +05:30
dhrubesh
fcc248ddf6 resets data to avoid multiple re-rendering for parallel apis 2021-05-15 15:18:30 +05:30
dhrubesh
3318ec8c38 removes 1day and adds 5mins 2021-05-15 14:59:47 +05:30
dhrubesh
a416767950 choose config based on routes 2021-05-13 20:07:48 +05:30
dhrubesh
173bd01e70 adds last 1min to store 2021-05-13 20:07:25 +05:30
dhrubesh
de4adeded5 creates 2 diff config for datepicker 2021-05-13 20:06:44 +05:30
Ankit Anand
674fb34115 updated readme 2021-05-11 21:41:42 +05:30
Shweta Bhave
9c74f0bae5 updated readme 2021-05-11 21:34:32 +05:30
Ankit Nayan
2999adc98f added nodes without any dependencies in serviceMap 2021-05-11 13:15:10 +05:30
Ankit Nayan
be7d8c3347 fixed default 4xxErrorRate injected to test 2021-05-10 17:02:58 +05:30
Ankit Nayan
41dd007380 increased speed of particles in serviceMap 2021-05-10 12:07:38 +05:30
Ankit Nayan
83eb73ee03 changing deployment options to 0.2.1 2021-05-10 10:46:16 +05:30
Ankit Nayan
5b2f985710 Merge pull request #91 from SigNoz/disable-options
Disables invalid CTA, updates options based on API payload
2021-05-10 00:10:04 +05:30
dhrubesh
e9c03c4d85 p90->p95 2021-05-10 00:05:49 +05:30
Ankit Nayan
d07e277220 Merge pull request #90 from SigNoz/service-map
Service map view
2021-05-10 00:02:07 +05:30
dhrubesh
9bcdb2ede6 removes set alert 2021-05-10 00:01:50 +05:30
dhrubesh
4bbc4eef1a 399 --> 380 2021-05-09 23:57:50 +05:30
dhrubesh
36ad8987dd cosmetic updates 2021-05-09 23:45:42 +05:30
dhrubesh
45f1c2ec11 removes hardcoding 2021-05-09 23:03:51 +05:30
dhrubesh
705279b6fd fixes zoom issue 2021-05-09 23:02:16 +05:30
dhrubesh
9ac2dece11 UX updates 2021-05-09 22:51:08 +05:30
dhrubesh
325ca434d4 adds height variant 2021-05-09 20:47:56 +05:30
dhrubesh
128d75a144 fixes zoom px and disabledNodeDrag 2021-05-09 19:30:16 +05:30
dhrubesh
45375fbd53 fixes edge case 2021-05-09 19:12:54 +05:30
dhrubesh
2d646c0655 adds hardcoded data 2021-05-09 18:59:49 +05:30
dhrubesh
6f12d06a32 adds selection of service and zoom into node feature 2021-05-09 18:27:37 +05:30
dhrubesh
bc02aa5eef calculate nodes size and color via RPS errorRate 2021-05-09 15:41:57 +05:30
dhrubesh
c7ed2daf4a initial set up with react-force-graph 2021-05-09 14:44:14 +05:30
Pranay Prateek
5e97dfa5fc updated twitter handle for SigNoz 2021-05-09 01:26:25 +05:30
Ankit Nayan
44666a4944 changed p90 to p95 in service overview api 2021-05-06 17:59:54 +05:30
Ankit Nayan
14f6a23f51 Merge branch 'main' of https://github.com/signoz/signoz into main 2021-05-06 17:36:16 +05:30
Ankit Nayan
050b57c72b added ability to query tags with isnotnull operator 2021-05-06 17:35:29 +05:30
Ankit Nayan
0f891ccb26 added kind as field in model for span search 2021-05-06 17:34:55 +05:30
Ankit Nayan
b3755325ba added kind as url param in parser for span search 2021-05-06 17:34:30 +05:30
Pranay Prateek
3014948f26 Merge pull request #85 from pranay01/main
updated badges on README
2021-05-06 17:18:28 +05:30
Pranay Prateek
1e1fc38c96 updated badges 2021-05-06 17:04:15 +05:30
Pranay Prateek
dad678a4c1 updated badges 2021-05-06 16:45:15 +05:30
Pranay Prateek
f91d8685e3 Merge pull request #84 from pranay01/main
updated docker pull badge
2021-05-06 13:18:13 +05:30
Pranay Prateek
50a2f3b6f9 updated docker pull badge 2021-05-06 13:16:41 +05:30
Pranay Prateek
97c7543557 Merge pull request #83 from pranay01/main
updated features section
2021-05-06 12:55:19 +05:30
Pranay Prateek
e4c8dcf3ca updated features section 2021-05-06 12:54:32 +05:30
Pranay Prateek
5a6158a2e5 Merge pull request #82 from pranay01/main
Updated ReadMe
2021-05-06 12:43:35 +05:30
Pranay Prateek
9936b3ab46 updated motivation section 2021-05-06 12:41:52 +05:30
Pranay Prateek
673d65db40 updated intro 2021-05-06 12:39:55 +05:30
Pranay Prateek
5e1592274c updated motivation and intro 2021-05-06 12:31:14 +05:30
Ankit Nayan
a50fd14ef2 fixed bug in External APIs error % mapping 2021-05-05 13:04:09 +05:30
Ankit Nayan
baedfa62d2 added service map api and 4xx rate in /services api 2021-05-05 00:03:57 +05:30
107 changed files with 26096 additions and 2289 deletions

33
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,33 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
## Bug description
*Please describe.*
*If this affects the front-end, screenshots would be of great help.*
## Expected behavior
## How to reproduce
1.
2.
3.
## Version information
* **Signoz version**:
* **Browser version**:
* **Your OS and version**:
## Additional context
#### *Thank you* for your bug report we love squashing them!

View File

@@ -0,0 +1,27 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
## Is your feature request related to a problem?
*Please describe.*
## Describe the solution you'd like
## Describe alternatives you've considered
## Additional context
Add any other context or screenshots about the feature request here.
#### *Thank you* for your feature request we love each and every one!

View File

@@ -0,0 +1,33 @@
---
name: Performance issue report
about: Long response times, high resource usage? Ensuring that SigNoz is scalable
is our top priority
title: ''
labels: ''
assignees: ''
---
## In what situation are you experiencing subpar performance?
*Please describe.*
## How to reproduce
1.
2.
3.
## Your Environment
- [ ] Linux
- [ ] Mac
- [ ] Windows
Please provide details of OS version etc.
## Additional context
#### *Thank you* for your performance issue report we want SigNoz to be blazing fast!

View File

@@ -1,6 +1,6 @@
# How to Contribute
You can always reach out to ankit@signoz.io to understand more about the repo and product. We are very responsive over email and [slack](https://signoz-community.slack.com/join/shared_invite/zt-kj26gm1u-Xe3CYxCu0bGXCrCqKipjOA#/).
You can always reach out to ankit@signoz.io to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
- You can create a PR (Pull Request)
- If you find any bugs, please create an issue

View File

@@ -4,17 +4,27 @@
<p align="center">Monitor your applications and troubleshoot problems in your deployed applications, an open-source alternative to DataDog, New Relic, etc.</p>
</p>
[![MIT](https://img.shields.io/badge/license-MIT-brightgreen)](LICENSE)
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p>
##
SigNoz is an opensource observability platform. SigNoz uses distributed tracing to gain visibility into your systems and powers data using [Kafka](https://kafka.apache.org/) (to handle high ingestion rate and backpressure) and [Apache Druid](https://druid.apache.org/) (Apache Druid is a high performance real-time analytics database), both proven in the industry to handle scale.
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
<!-- ![SigNoz Feature](https://signoz.io/img/readme_feature1.jpg) -->
![SigNoz Feature](https://res.cloudinary.com/dcv3epinx/image/upload/v1618904032/signoz-images/screenzy-1618904013729_clssvy.png)
### Features:
### 👇 Features:
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
- Slowest endpoints in your application
@@ -22,16 +32,25 @@ SigNoz is an opensource observability platform. SigNoz uses distributed tracing
- Filter traces by service name, operation, latency, error, tags/annotations.
- Aggregate metrics on filtered traces. Eg, you can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
- In-built workflows to reduce your efforts in detecting common issues like new deployment failures, 3rd party slow APIs, etc (Coming Soon)
- Anomaly Detection Framework (Coming Soon)
### Motivation:
### 🤓 Why SigNoz?
- SaaS vendors charge an insane amount to provide Application Monitoring. They often surprise you with huge month end bills without any transparency of data sent to them.
- Data privacy and compliance demands data to not leave the network boundary
- Highly scalable architecture
- No more magic happening in agents installed in your infra. You take control of sampling, uptime, configuration.
- Build modules over SigNoz to extend business specific capabilities
Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency.
We wanted to make a self-hosted & open source version of tools like DataDog, NewRelic for companies that have privacy and security concerns about having customer data going to third party services.
Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities
### 👊🏻 Languages supported:
We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are:
- Java
- Python
- NodeJS
- Go
You can find the complete list of languages here - https://opentelemetry.io/docs/
# Getting Started
@@ -41,9 +60,9 @@ We have a tiny-cluster setup and a standard setup to deploy using docker-compose
Follow the steps listed at https://signoz.io/docs/deployment/docker/.
The troubleshooting instructions at https://signoz.io/docs/deployment/docker/#troubleshooting may be helpful
## Deploy in Kubernetes using Helm.
## Deploy in Kubernetes using Helm
Below steps will install the SigNoz in platform namespace inside your k8s cluster.
Below steps will install the SigNoz in `platform` namespace inside your k8s cluster.
```console
git clone https://github.com/SigNoz/signoz.git && cd signoz

View File

@@ -0,0 +1,517 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<!-- For HTTPS and SSL over native protocol. -->
<!--
<https_port>8443</https_port>
<tcp_ssl_port>9440</tcp_ssl_port>
-->
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
<client> <!-- Used for connecting to https dictionary source -->
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
<invalidCertificateHandler>
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
<name>RejectCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
<!--
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
-->
<!-- Port for communication between replicas. Used for data exchange. -->
<interserver_http_port>9009</interserver_http_port>
<!-- Hostname that is used by other replicas to request this server.
If not specified, than it is determined analoguous to 'hostname -f' command.
This setting could be used to switch replication to another network interface.
-->
<!--
<interserver_http_host>example.yandex.ru</interserver_http_host>
-->
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
<listen_host>::</listen_host>
<!-- Same for hosts with disabled ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
correct maximum value. -->
<!-- <max_open_files>262144</max_open_files> -->
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
Uncompressed cache is advantageous only for very short queries and in rare cases.
-->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<!-- Approximate size of mark cache, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
You should not lower this value.
-->
<mark_cache_size>5368709120</mark_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>users.xml</users_config>
<!-- Default profile of settings.. -->
<default_profile>default</default_profile>
<!-- Default database. -->
<default_database>default</default_database>
<!-- Server time zone could be set here.
Time zone is used when converting between String and DateTime types,
when printing DateTime in text formats and parsing DateTime from text,
it is used in date and time related functions, if specific time zone was not passed as an argument.
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
If not specified, system time zone at server startup is used.
Please note, that server could display time zone alias instead of specified name.
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
-->
<!-- <timezone>Europe/Moscow</timezone> -->
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
-->
<!-- <umask>022</umask> -->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.yandex/reference_en.html#Distributed
-->
<remote_servers incl="clickhouse_remote_servers" >
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_shard_localhost>
</remote_servers>
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Data%20replication
-->
<zookeeper incl="zookeeper-servers" optional="true" />
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
-->
<macros incl="macros" optional="true" />
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- Maximum session timeout, in seconds. Default: 3600. -->
<max_session_timeout>3600</max_session_timeout>
<!-- Default session timeout, in seconds. Default: 60. -->
<default_session_timeout>60</default_session_timeout>
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
<!--
interval - send every X second
root_path - prefix for keys
hostname_in_path - append hostname to root_path (default = true)
metrics - send data from table system.metrics
events - send data from table system.events
asynchronous_metrics - send data from table system.asynchronous_metrics
-->
<!--
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>60</interval>
<root_path>one_min</root_path>
<hostname_in_path>true<hostname_in_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
</graphite>
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>1</interval>
<root_path>one_sec</root_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>false</asynchronous_metrics>
</graphite>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.
When query log structure is changed after system update,
then old table will be renamed and new table will be created automatically.
-->
<database>system</database>
<table>query_log</table>
<!-- Interval of flushing data. -->
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<!-- Uncomment if use part_log
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
-->
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
<!-- Configuration of external dictionaries. See:
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
-->
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.
-->
<compression incl="clickhouse_compression">
<!--
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
<case>
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
<!- - What compression method to use. - ->
<method>zstd</method>
</case>
-->
</compression>
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
<!--
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
-->
<!-- Protection from accidental DROP.
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
Uncomment to disable protection.
-->
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
<!-- Example of parameters for GraphiteMergeTree table engine -->
<graphite_rollup>
<!-- carbon -->
<pattern>
<regexp>^carbon\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>7776000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- collectd -->
<pattern>
<regexp>^collectd\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>43200</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- high -->
<pattern>
<regexp>^high\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>172800</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- medium -->
<pattern>
<regexp>^medium\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- low -->
<pattern>
<regexp>^low\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>600</precision>
</retention>
<retention>
<age>15552000</age>
<precision>1800</precision>
</retention>
<retention>
<age>31536000</age>
<precision>3600</precision>
</retention>
<retention>
<age>63072000</age>
<precision>21600</precision>
</retention>
<retention>
<age>126144000</age>
<precision>43200</precision>
</retention>
<retention>
<age>252288000</age>
<precision>86400</precision>
</retention>
<retention>
<age>315360000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- default -->
<default>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</default>
</graphite_rollup>
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
The directory will be created if it doesn't exist.
-->
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
</yandex>

View File

@@ -0,0 +1,97 @@
version: "2.4"
services:
clickhouse:
image: yandex/clickhouse-server
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
query-service:
image: signoz/query-service:0.3.1
container_name: query-service
ports:
- "8080:8080"
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.3.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcol:latest
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,27 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@@ -0,0 +1,39 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]

View File

@@ -1,6 +1,16 @@
server {
listen 3000;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;

View File

@@ -140,6 +140,11 @@ services:
env_file:
- environment_tiny/router
- environment_tiny/common
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.2.0
@@ -158,7 +163,7 @@ services:
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.2.0
image: signoz.docker.scarf.sh/signoz/query-service:0.3.1
container_name: query-service
depends_on:
@@ -169,11 +174,15 @@ services:
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.2.1
image: signoz/frontend:0.3.1
container_name: frontend
depends_on:
@@ -183,7 +192,7 @@ services:
ports:
- "3000:3000"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
@@ -260,5 +269,5 @@ services:
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ./locust-scripts:/locust
- ../common/locust-scripts:/locust

View File

@@ -135,6 +135,11 @@ services:
- router
env_file:
- environment_small/router
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.2.0
@@ -153,7 +158,7 @@ services:
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.2.0
image: signoz.docker.scarf.sh/signoz/query-service:0.3.1
container_name: query-service
depends_on:
@@ -164,11 +169,15 @@ services:
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.2.1
image: signoz/frontend:0.3.1
container_name: frontend
depends_on:

View File

@@ -0,0 +1,26 @@
# For S3 storage
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
druid_storage_type=s3
druid_storage_bucket=solvzy-test3
druid_storage_baseKey=druid/segments
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test3
druid_indexer_logs_s3Prefix=druid/indexing-logs
# -----------------------------------------------------------
# For local storage
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/data/segments
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/data/indexing-logs

View File

@@ -1,256 +0,0 @@
#!/bin/bash
set -o errexit
is_command_present() {
type "$1" >/dev/null 2>&1
}
is_mac() {
[[ $OSTYPE == darwin* ]]
}
check_k8s_setup() {
echo "Checking your k8s setup status"
if ! is_command_present kubectl; then
echo "Please install kubectl on your machine"
exit 1
else
if ! is_command_present jq; then
install_jq
fi
clusters=`kubectl config view -o json | jq -r '."current-context"'`
if [[ ! -n $clusters ]]; then
echo "Please setup a k8s cluster & config kubectl to connect to it"
exit 1
fi
k8s_minor_version=`kubectl version --short -o json | jq ."serverVersion.minor" | sed 's/[^0-9]*//g'`
# if [[ $k8s_minor_version < 18 ]]; then
# echo "+++++++++++ ERROR ++++++++++++++++++++++"
# echo "SigNoz deployments require Kubernetes >= v1.18. Found version: v1.$k8s_minor_version"
# echo "+++++++++++ ++++++++++++++++++++++++++++"
# exit 1
# fi;
fi
}
install_jq(){
if [ $package_manager == "brew" ]; then
brew install jq
elif [ $package_manager == "yum" ]; then
yum_cmd="sudo yum --assumeyes --quiet"
$yum_cmd install jq
else
apt_cmd="sudo apt-get --yes --quiet"
$apt_cmd update
$apt_cmd install jq
fi
}
check_os() {
if is_mac; then
package_manager="brew"
desired_os=1
os="Mac"
return
fi
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in
Ubuntu*)
desired_os=1
os="ubuntu"
package_manager="apt-get"
;;
Debian*)
desired_os=1
os="debian"
package_manager="apt-get"
;;
Red\ Hat*)
desired_os=1
os="red hat"
package_manager="yum"
;;
CentOS*)
desired_os=1
os="centos"
package_manager="yum"
;;
*)
desired_os=0
os="Not Found"
esac
}
echo_contact_support() {
echo "Please contact <support@signoz.io> with your OS details and version${1:-.}"
}
bye() { # Prints a friendly good bye message and exits the script.
set +o errexit
echo "Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo -e "\nExiting for now. Bye! \U1F44B\n"
exit 1
}
deploy_app() {
kubectl apply -f "$install_dir/config-template"
kubectl apply -f "$install_dir"
}
wait_for_application_start() {
local timeout=$1
address=$custom_domain
if [[ "$ssl_enable" == "true" ]]; then
protocol="https"
else
protocol="http"
fi
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
if [[ $address == "" || $address == null ]]; then
address=`kubectl get ingress appsmith-ingress -o json | jq -r '.status.loadBalancer.ingress[0].ip'`
fi
status_code="$(curl -s -o /dev/null -w "%{http_code}" $protocol://$address/api/v1 || true)"
if [[ status_code -eq 401 ]]; then
break
else
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
fi
((timeout--))
sleep 1
done
echo ""
}
echo -e "👋 Thank you for trying out SigNoz! "
echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
echo -e "🕵️ Detecting your OS"
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
# Run bye if failure happens
trap bye EXIT
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
# Check for kubernetes setup
check_k8s_setup
echo ""
echo "Deploy Appmisth on your cluster"
echo ""
deploy_app
wait_for_application_start 60
if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo "Your installation is complete!"
echo ""
echo "Your frontend is running on 'http://localhost:3000'."
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "Need help Getting Started?"
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo ""
echo "Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
fi
echo -e "\nThank you!\n"

View File

@@ -2,6 +2,16 @@
set -o errexit
# Regular Colors
Black='\033[0;30m' # Black
Red='\[\e[0;31m\]' # Red
Green='\033[0;32m' # Green
Yellow='\033[0;33m' # Yellow
Blue='\033[0;34m' # Blue
Purple='\033[0;35m' # Purple
Cyan='\033[0;36m' # Cyan
White='\033[0;37m' # White
NC='\033[0m' # No Color
is_command_present() {
type "$1" >/dev/null 2>&1
@@ -88,7 +98,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|443"
local ports_pattern="80|3000|8080"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -192,7 +202,7 @@ install_docker_compose() {
echo ""
fi
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -212,8 +222,7 @@ install_docker_compose() {
start_docker() {
echo "Starting Docker ..."
if [ $os == "Mac" ]
then
if [ $os = "Mac" ]; then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
else
if ! sudo systemctl is-active docker.service > /dev/null; then
@@ -231,16 +240,17 @@ wait_for_containers_start() {
if [[ status_code -eq 200 ]]; then
break
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [ $setup_type == 'druid' ]; then
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo "No Supervisors found... Re-applying docker compose\n"
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up -d
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
fi
fi
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
fi
((timeout--))
sleep 1
@@ -253,14 +263,18 @@ bye() { # Prints a friendly good bye message and exits the script.
if [ "$?" -ne 0 ]; then
set +o errexit
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
echo "Please share your email to receive support with the installation"
echo -e "\n📨 Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
@@ -268,7 +282,7 @@ bye() { # Prints a friendly good bye message and exits the script.
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -294,17 +308,39 @@ echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
echo -e "🕵️ Detecting your OS"
echo -e "Detecting your OS ..."
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
echo ""
echo -e "👉 ${RED}Two ways to go forward\n"
echo -e "${RED}1) ClickHouse as database (default)\n"
echo -e "${RED}2) Kafka + Druid setup to handle scale (recommended for production use)\n"
read -p "⚙️ Enter your preference (1/2):" choice_setup
while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
do
# echo $choice_setup
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
read -p "⚙️ Enter your preference (1/2): " choice_setup
# echo $choice_setup
done
if [[ $choice_setup == "1" || $choice_setup == "" ]];then
setup_type='clickhouse'
else
setup_type='druid'
fi
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
# Run bye if failure happens
trap bye EXIT
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -316,7 +352,7 @@ fi
if [[ $desired_os -eq 0 ]];then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -340,7 +376,7 @@ if ! is_command_present docker; then
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -358,43 +394,59 @@ if ! is_command_present docker-compose; then
install_docker_compose
fi
# if ! is_command_present docker-compose; then
# install_docker_machine
# docker-machine create -d virtualbox --virtualbox-memory 3584 signoz
# fi
start_docker
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
echo ""
echo "Pulling the latest container images for SigNoz. To run as sudo it will ask for system password."
sudo docker-compose -f ./docker/docker-compose-tiny.yaml pull
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
fi
echo ""
echo "Starting the SigNoz containers. It may take a few minute ..."
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up --detach --remove-orphans || true
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
fi
wait_for_containers_start 60
echo ""
if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
if [ $setup_type == 'clickhouse' ]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
fi
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -408,7 +460,7 @@ if [[ $status_code -ne 200 ]]; then
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -418,17 +470,25 @@ else
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo "Your installation is complete!"
echo ""
echo "Your frontend is running on 'http://localhost:3000'."
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3000"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
fi
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "Need help Getting Started?"
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "👉 Need help Getting Started?"
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo ""
echo "Please share your email to receive support & updates about SigNoz!"
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
while [[ $email == "" ]]
@@ -436,7 +496,7 @@ else
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@@ -448,28 +508,4 @@ else
fi
echo -e "\nThank you!\n"
##### Changing default memory limit of docker ############
# # Check if memory is less and Confirm to increase size of docker machine
# # https://github.com/docker/machine/releases
# # On OS X
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine && \
# chmod +x /usr/local/bin/docker-machine
# # On Linux
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
# chmod +x /tmp/docker-machine &&
# sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
# VBoxManage list vms
# docker-machine stop
# VBoxManage modifyvm default --cpus 2
# VBoxManage modifyvm default --memory 4096
# docker-machine start
# VBoxManage showvminfo default | grep Memory
# VBoxManage showvminfo default | grep CPU
echo -e "\n🙏 Thank you!\n"

View File

@@ -5,64 +5,72 @@ metadata:
data:
supervisor-spec.json: |
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "signoz-kafka:9092"
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "signoz-kafka:9092"
}
}
}
}

View File

@@ -8,14 +8,14 @@ metadata:
data:
otel-collector-config: |
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
otlp:
protocols:
grpc:
http:
processors:
batch:
send_batch_size: 1000
@@ -36,9 +36,16 @@ data:
health_check: {}
zpages: {}
exporters:
kafka:
kafka/traces:
brokers:
- signoz-kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- signoz-kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
@@ -46,8 +53,8 @@ data:
traces:
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka]
exporters: [kafka/metrics]

View File

@@ -13,9 +13,9 @@ dependencies:
version: 0.2.0
- name: query-service
repository: file://./signoz-charts/query-service
version: 0.2.0
version: 0.3.1
- name: frontend
repository: file://./signoz-charts/frontend
version: 0.2.1
digest: sha256:7ea89a82fabae53ff97cbdaddab0c9edf952a3d212237efc5897b32937d940fd
generated: "2021-05-02T23:16:58.998702+05:30"
version: 0.3.1
digest: sha256:ed5735a81c416a15b1e498f86a2ddb550ca0da9f5f445891561be0ef5d01b3b2
generated: "2021-06-08T22:35:14.109626+05:30"

View File

@@ -15,12 +15,12 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.0
version: 0.2.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.2.0
appVersion: 0.2.2
dependencies:
- name: zookeeper
@@ -37,7 +37,7 @@ dependencies:
version: 0.2.0
- name: query-service
repository: "file://./signoz-charts/query-service"
version: 0.2.0
version: 0.3.1
- name: frontend
repository: "file://./signoz-charts/frontend"
version: 0.2.1
version: 0.3.1

View File

@@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.2.1
version: 0.3.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.2.1
appVersion: 0.3.1

View File

@@ -9,6 +9,16 @@ data:
server {
listen {{ .Values.service.port }};
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;

View File

@@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.2.0
version: 0.3.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.2.0
appVersion: 0.3.1

View File

@@ -36,7 +36,8 @@ spec:
value: {{ .Values.configVars.DruidClientUrl }}
- name: DruidDatasource
value: {{ .Values.configVars.DruidDatasource }}
- name: STORAGE
value: {{ .Values.configVars.STORAGE }}
# livenessProbe:
# httpGet:

View File

@@ -16,6 +16,7 @@ fullnameOverride: ""
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"

View File

@@ -10,6 +10,9 @@ kafka:
zookeeperConnectionTimeoutMs: 6000
druid:
image:
tag: 0.21.1-rc2
configVars:
# To store data on local disks attached
@@ -45,3 +48,4 @@ query-service:
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid

View File

@@ -1,6 +1,15 @@
server {
listen 3000;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;

21590
frontend/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,9 @@
"author": "",
"license": "ISC",
"dependencies": {
"@ant-design/icons": "^4.6.2",
"@auth0/auth0-react": "^1.2.0",
"@babel/core": "7.12.3",
"@material-ui/core": "^4.0.0",
"@pmmmwh/react-refresh-webpack-plugin": "0.4.2",
"@svgr/webpack": "5.4.0",
"@testing-library/jest-dom": "^5.11.4",
@@ -47,8 +47,6 @@
"chart.js": "^2.9.4",
"css-loader": "4.3.0",
"d3": "^6.2.0",
"d3-array": "^2.8.0",
"d3-ease": "^2.0.0",
"d3-flame-graph": "^3.1.1",
"d3-tip": "^0.9.1",
"dotenv": "8.2.0",
@@ -71,11 +69,7 @@
"jest-circus": "26.6.0",
"jest-resolve": "26.6.0",
"jest-watch-typeahead": "0.6.1",
"material-ui-chip-input": "^2.0.0-beta.2",
"mini-css-extract-plugin": "0.11.3",
"optimize-css-assets-webpack-plugin": "5.0.4",
"pnp-webpack-plugin": "1.6.4",
"postcss-flexbugs-fixes": "4.2.1",
"postcss-loader": "3.0.0",
"postcss-normalize": "8.0.1",
"postcss-preset-env": "6.7.0",
@@ -88,13 +82,13 @@
"react-css-theme-switcher": "^0.1.6",
"react-dev-utils": "^11.0.0",
"react-dom": "17.0.0",
"react-force-graph": "^1.41.0",
"react-graph-vis": "^1.0.5",
"react-modal": "^3.12.1",
"react-redux": "^7.2.2",
"react-refresh": "^0.8.3",
"react-router-dom": "^5.2.0",
"react-vis": "^1.11.7",
"recharts": "^1.8.5",
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"resolve": "1.18.1",
@@ -140,6 +134,7 @@
"@babel/preset-typescript": "^7.12.17",
"autoprefixer": "^9.0.0",
"babel-plugin-styled-components": "^1.12.0",
"compression-webpack-plugin": "^8.0.0",
"copy-webpack-plugin": "^7.0.0",
"gulp": "^4.0.2",
"gulp-csso": "^4.0.1",

View File

@@ -1,18 +1,8 @@
@import "~antd/dist/antd.dark.css";
@import "~antd/dist/antd.compact.css";
.ant-space-item {
margin-right: 0 !important;
}
/* #components-layout-demo-side .logo {
height: 32px;
margin: 16px;
background: rgba(255, 255, 255, 0.3);
}
.site-layout .site-layout-background {
background: #fff;
} */
.instrument-card{
border-radius: 4px;
background: #313131;

View File

@@ -1,3 +1,3 @@
export enum LOCAL_STORAGE {
METRICS_TIME_IN_DURATION = "metricsTimeDuration",
METRICS_TIME_IN_DURATION = "metricsTimeDurations",
}

View File

@@ -1,17 +1,11 @@
import React, { Suspense } from "react";
import { Layout, Spin } from "antd";
import { Spin } from "antd";
import { useThemeSwitcher } from "react-css-theme-switcher";
import ROUTES from "Src/constants/routes";
import { IS_LOGGED_IN } from "Src/constants/auth";
import {
BrowserRouter as Router,
Route,
Switch,
Redirect,
} from "react-router-dom";
import { BrowserRouter, Route, Switch, Redirect } from "react-router-dom";
import SideNav from "./Nav/SideNav";
import TopNav from "./Nav/TopNav";
import BaseLayout from "./BaseLayout";
import {
ServiceMetrics,
ServiceMap,
@@ -23,8 +17,7 @@ import {
SettingsPage,
IntstrumentationPage,
} from "Src/pages";
const { Content, Footer } = Layout;
import { RouteProvider } from "./RouteProvider";
const App = () => {
const { status } = useThemeSwitcher();
@@ -34,47 +27,53 @@ const App = () => {
}
return (
<Router basename="/">
<Layout style={{ minHeight: "100vh" }}>
<SideNav />
<Layout className="site-layout">
<Content style={{ margin: "0 16px" }}>
<TopNav />
<Suspense fallback={<Spin size="large" />}>
<Switch>
<Route path={ROUTES.SIGN_UP} component={Signup} />
<Route path={ROUTES.SERVICE_METRICS} component={ServiceMetrics} />
<Route path={ROUTES.SERVICE_MAP} component={ServiceMap} />
<Route path={ROUTES.TRACES} exact component={TraceDetail} />
<Route path={ROUTES.TRACE_GRAPH} component={TraceGraph} />
<Route path={ROUTES.SETTINGS} exact component={SettingsPage} />
<Route
path={ROUTES.INSTRUMENTATION}
exact
component={IntstrumentationPage}
/>
<Route path={ROUTES.USAGE_EXPLORER} component={UsageExplorer} />
<Route path={ROUTES.APPLICATION} exact component={ServicesTable} />
<Route
path="/"
exact
render={() => {
return localStorage.getItem(IS_LOGGED_IN) === "yes" ? (
<Redirect to={ROUTES.APPLICATION} />
) : (
<Redirect to={ROUTES.SIGN_UP} />
);
}}
/>
</Switch>
</Suspense>
</Content>
<Footer style={{ textAlign: "center", fontSize: 10 }}>
SigNoz Inc. ©2020{" "}
</Footer>
</Layout>
</Layout>
</Router>
<BrowserRouter>
<Suspense fallback={<Spin size="large" />}>
<Route path={"/"}>
<Switch>
<RouteProvider>
<BaseLayout>
<Suspense fallback={<Spin size="large" />}>
<Route path={ROUTES.SIGN_UP} exact component={Signup} />
<Route path={ROUTES.APPLICATION} exact component={ServicesTable} />
<Route
path={ROUTES.SERVICE_METRICS}
exact
component={ServiceMetrics}
/>
<Route path={ROUTES.SERVICE_MAP} exact component={ServiceMap} />
<Route path={ROUTES.TRACES} exact component={TraceDetail} />
<Route path={ROUTES.TRACE_GRAPH} exact component={TraceGraph} />
<Route path={ROUTES.SETTINGS} exact component={SettingsPage} />
<Route
path={ROUTES.INSTRUMENTATION}
exact
component={IntstrumentationPage}
/>
<Route
path={ROUTES.USAGE_EXPLORER}
exactexact
component={UsageExplorer}
/>
<Route
path="/"
exact
render={() => {
return localStorage.getItem(IS_LOGGED_IN) === "yes" ? (
<Redirect to={ROUTES.APPLICATION} />
) : (
<Redirect to={ROUTES.SIGN_UP} />
);
}}
/>
</Suspense>
</BaseLayout>
</RouteProvider>
</Switch>
</Route>
</Suspense>
</BrowserRouter>
);
};

View File

@@ -0,0 +1,39 @@
import React, { ReactNode, useEffect } from "react";
import { Layout } from "antd";
import SideNav from "./Nav/SideNav";
import TopNav from "./Nav/TopNav";
import { useLocation } from "react-router-dom";
import { useRoute } from "./RouteProvider";
const { Content, Footer } = Layout;
interface BaseLayoutProps {
children: ReactNode;
}
const BaseLayout: React.FC<BaseLayoutProps> = ({ children }) => {
const location = useLocation();
const { dispatch } = useRoute();
useEffect(() => {
dispatch({ type: "ROUTE_IS_LOADED", payload: location.pathname });
}, [location]);
return (
<Layout style={{ minHeight: "100vh" }}>
<SideNav />
<Layout className="site-layout">
<Content style={{ margin: "0 16px" }}>
<TopNav />
{children}
</Content>
<Footer style={{ textAlign: "center", fontSize: 10 }}>
SigNoz Inc. ©2020{" "}
</Footer>
</Layout>
</Layout>
);
};
export default BaseLayout;

View File

@@ -6,7 +6,7 @@ import { RouteComponentProps } from "react-router-dom";
import styled from "styled-components";
import ROUTES from "Src/constants/routes";
import { metricItem } from "../../store/actions/metrics";
import { metricItem } from "../../store/actions/MetricsActions";
const ChartPopUpUnique = styled.div<{
ycoordinate: number;
@@ -55,7 +55,7 @@ class ErrorRateChart extends React.Component<ErrorRateChartProps> {
xcoordinate: 0,
ycoordinate: 0,
showpopUp: false,
firstpoint_ts: 0
firstpoint_ts: 0,
// graphInfo:{}
};
@@ -188,7 +188,7 @@ class ErrorRateChart extends React.Component<ErrorRateChartProps> {
ycoordinate={this.state.ycoordinate}
>
<PopUpElements onClick={this.gotoTracesHandler}>View Traces</PopUpElements>
<PopUpElements onClick={this.gotoAlertsHandler}>Set Alerts</PopUpElements>
{/* <PopUpElements onClick={this.gotoAlertsHandler}>Set Alerts</PopUpElements> */}
</ChartPopUpUnique>
);
} else return null;

View File

@@ -1,11 +1,9 @@
import React from "react";
import { Line as ChartJSLine } from "react-chartjs-2";
import { ChartOptions } from "chart.js";
import { withRouter } from "react-router";
import { RouteComponentProps } from "react-router-dom";
import styled from "styled-components";
import { getOptions, borderColors } from "./graphConfig";
import { externalMetricsItem } from "../../store/actions/metrics";
import { externalMetricsItem } from "../../../store/actions/MetricsActions";
import { uniqBy, filter } from "lodash";
const theme = "dark";

View File

@@ -2,7 +2,7 @@ import React from "react";
import { Bar, Line as ChartJSLine } from "react-chartjs-2";
import styled from "styled-components";
import { customMetricsItem } from "../../store/actions/metrics";
import { customMetricsItem } from "../../store/actions/MetricsActions";
const GenVisualizationWrapper = styled.div`
height: 160px;

View File

@@ -4,9 +4,7 @@ import { ChartOptions } from "chart.js";
import { withRouter } from "react-router";
import { RouteComponentProps } from "react-router-dom";
import styled from "styled-components";
import ROUTES from "Src/constants/routes";
import { metricItem } from "../../store/actions/metrics";
import { metricItem } from "../../store/actions/MetricsActions";
const ChartPopUpUnique = styled.div<{
ycoordinate: number;
@@ -39,11 +37,8 @@ interface LatencyLineChartProps extends RouteComponentProps<any> {
popupClickHandler: Function;
}
interface LatencyLineChart {
chartRef: any;
}
class LatencyLineChart extends React.Component<LatencyLineChartProps> {
private chartRef: React.RefObject<HTMLElement>;
constructor(props: LatencyLineChartProps) {
super(props);
this.chartRef = React.createRef();
@@ -54,7 +49,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
ycoordinate: 0,
showpopUp: false,
firstpoint_ts: 0,
// graphInfo:{}
};
onClickhandler = async (e: any, event: any) => {
@@ -69,7 +63,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
ycoordinate: e.offsetY,
showpopUp: true,
firstpoint_ts: this.props.data[firstPoint._index].timestamp,
// graphInfo:{...event}
});
} else {
// if clicked outside of the graph line, then firstpoint is undefined -> close popup.
@@ -80,15 +73,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
}
};
gotoTracesHandler = (xc: any) => {
this.props.history.push(ROUTES.TRACES);
};
gotoAlertsHandler = () => {
this.props.history.push(ROUTES.SERVICE_MAP);
// PNOTE - Keeping service map for now, will replace with alerts when alert page is made
};
options_charts: ChartOptions = {
onClick: this.onClickhandler,
@@ -161,9 +145,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
xAxes: [
{
type: "time",
// time: {
// unit: 'second'
// },
distribution: "linear",
//'linear': data are spread according to their time (distances can vary)
// From https://www.chartjs.org/docs/latest/axes/cartesian/time.html
@@ -193,7 +174,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
>
View Traces
</PopUpElements>
<PopUpElements onClick={this.gotoAlertsHandler}>Set Alerts</PopUpElements>
</ChartPopUpUnique>
);
} else return null;
@@ -218,8 +198,8 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
borderWidth: 2,
},
{
label: "p90 Latency",
data: ndata.map((s) => s.p90 / 1000000), //converting latency from nano sec to ms
label: "p95 Latency",
data: ndata.map((s) => s.p95 / 1000000), //converting latency from nano sec to ms
pointRadius: 0.5,
borderColor: "rgba(227, 74, 51, 1.0)",
borderWidth: 2,
@@ -239,7 +219,7 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
<div>
{this.GraphTracePopUp()}
<div>
<div style={{textAlign: "center"}}>Application latency in ms</div>
<div style={{ textAlign: "center" }}>Application latency in ms</div>
<ChartJSLine
ref={this.chartRef}
data={data_chartJS}

View File

@@ -5,7 +5,7 @@ import { withRouter } from "react-router";
import { RouteComponentProps } from "react-router-dom";
import styled from "styled-components";
import { metricItem } from "../../store/actions/metrics";
import { metricItem } from "../../store/actions/MetricsActions";
import ROUTES from "Src/constants/routes";
const ChartPopUpUnique = styled.div<{
@@ -177,7 +177,7 @@ class RequestRateChart extends React.Component<RequestRateChartProps> {
ycoordinate={this.state.ycoordinate}
>
<PopUpElements onClick={this.gotoTracesHandler}>View Traces</PopUpElements>
<PopUpElements onClick={this.gotoAlertsHandler}>Set Alerts</PopUpElements>
{/* <PopUpElements onClick={this.gotoAlertsHandler}>Set Alerts</PopUpElements> */}
</ChartPopUpUnique>
);
} else return null;

View File

@@ -4,23 +4,24 @@ import { connect } from "react-redux";
import { useParams, RouteComponentProps } from "react-router-dom";
import { withRouter } from "react-router";
import ROUTES from "Src/constants/routes";
import { GlobalTime, updateTimeInterval } from "Src/store/actions";
import {
getServicesMetrics,
metricItem,
getTopEndpoints,
getDbOverViewMetrics,
getExternalMetrics,
externalMetricsAvgDurationItem,
externalErrCodeMetricsItem,
externalMetricsItem,
dbOverviewMetricsItem,
topEndpointListItem,
} from "../../store/actions/MetricsActions";
import {
getServicesMetrics,
getTopEndpoints,
getDbOverViewMetrics,
getExternalMetrics,
getExternalAvgDurationMetrics,
getExternalErrCodeMetrics,
topEndpointListItem,
GlobalTime,
updateTimeInterval,
} from "Src/store/actions";
} from "../../store/actions/MetricsActions";
import { StoreState } from "../../store/reducers";
import LatencyLineChart from "./LatencyLineChart";
import RequestRateChart from "./RequestRateChart";
@@ -223,13 +224,13 @@ const mapStateToProps = (
globalTime: GlobalTime;
} => {
return {
externalErrCodeMetrics: state.externalErrCodeMetrics,
serviceMetrics: state.serviceMetrics,
topEndpointsList: state.topEndpointsList,
externalMetrics: state.externalMetrics,
externalErrCodeMetrics: state.metricsData.externalErrCodeMetricsItem,
serviceMetrics: state.metricsData.metricItems,
topEndpointsList: state.metricsData.topEndpointListItem,
externalMetrics: state.metricsData.externalMetricsItem,
globalTime: state.globalTime,
dbOverviewMetrics: state.dbOverviewMetrics,
externalAvgDurationMetrics: state.externalAvgDurationMetrics,
dbOverviewMetrics: state.metricsData.dbOverviewMetricsItem,
externalAvgDurationMetrics: state.metricsData.externalMetricsAvgDurationItem,
};
};

View File

@@ -1,17 +1,12 @@
import React, { useEffect, useState } from "react";
import { useLocation } from "react-router-dom";
import { NavLink } from "react-router-dom";
import { Button, Space, Spin, Table } from "antd";
import styled from "styled-components";
import { connect } from "react-redux";
import { SKIP_ONBOARDING } from "Src/constants/onboarding";
import ROUTES from "Src/constants/routes";
import {
getServicesList,
GlobalTime,
servicesListItem,
} from "../../store/actions";
import { getServicesList, GlobalTime } from "../../store/actions";
import { servicesListItem } from "../../store/actions/MetricsActions";
import { StoreState } from "../../store/reducers";
import { CustomModal } from "../../components/Modal";
@@ -75,7 +70,7 @@ const columns = [
key: "errorRate",
sorter: (a: any, b: any) => a.errorRate - b.errorRate,
// sortDirections: ['descend', 'ascend'],
render: (value: number) => (value).toFixed(2),
render: (value: number) => value.toFixed(2),
},
{
title: "Requests Per Second",
@@ -88,8 +83,6 @@ const columns = [
];
const _ServicesTable = (props: ServicesTableProps) => {
const search = useLocation().search;
const time_interval = new URLSearchParams(search).get("time");
const [initialDataFetch, setDataFetched] = useState(false);
const [errorObject, setErrorObject] = useState({
message: "",
@@ -210,7 +203,10 @@ const _ServicesTable = (props: ServicesTableProps) => {
const mapStateToProps = (
state: StoreState,
): { servicesList: servicesListItem[]; globalTime: GlobalTime } => {
return { servicesList: state.servicesList, globalTime: state.globalTime };
return {
servicesList: state.metricsData.serviceList,
globalTime: state.globalTime,
};
};
export const ServicesTable = connect(mapStateToProps, {

View File

@@ -0,0 +1,12 @@
@media only screen and (min-width: 768px) {
.topEndpointsButton {
white-space: nowrap;
padding: 0;
}
.topEndpointsButton span {
text-overflow: ellipsis;
overflow: hidden;
max-width: 120px;
}
}

View File

@@ -1,18 +1,22 @@
import React from "react";
import { Table, Button } from "antd";
import { Table, Button, Tooltip } from "antd";
import { connect } from "react-redux";
import styled from "styled-components";
import { useHistory, useParams } from "react-router-dom";
import { topEndpointListItem } from "../../store/actions/metrics";
import { topEndpointListItem } from "../../store/actions/MetricsActions";
import { METRICS_PAGE_QUERY_PARAM } from "Src/constants/query";
import { GlobalTime } from "Src/store/actions";
import { StoreState } from "Src/store/reducers";
import "./TopEndpointsTable.css";
const Wrapper = styled.div`
padding-top: 10px;
padding-bottom: 10px;
padding-left: 20px;
padding-right: 20px;
padding-left: 8px;
padding-right: 8px;
@media only screen and (max-width: 767px) {
padding: 0;
}
.ant-table table {
font-size: 12px;
}
@@ -22,6 +26,9 @@ const Wrapper = styled.div`
.ant-table-thead > tr > th {
padding: 10px;
}
.ant-table-column-sorters {
padding: 6px;
}
`;
interface TopEndpointsTableProps {
@@ -58,9 +65,15 @@ const _TopEndpointsTable = (props: TopEndpointsTableProps) => {
key: "name",
render: (text: string) => (
<Button type="link" onClick={() => handleOnClick(text)}>
{text}
</Button>
<Tooltip placement="topLeft" title={text}>
<Button
className="topEndpointsButton"
type="link"
onClick={() => handleOnClick(text)}
>
{text}
</Button>
</Tooltip>
),
},
{
@@ -72,10 +85,10 @@ const _TopEndpointsTable = (props: TopEndpointsTableProps) => {
render: (value: number) => (value / 1000000).toFixed(2),
},
{
title: "P90 (in ms)",
dataIndex: "p90",
key: "p90",
sorter: (a: any, b: any) => a.p90 - b.p90,
title: "P95 (in ms)",
dataIndex: "p95",
key: "p95",
sorter: (a: any, b: any) => a.p95 - b.p95,
// sortDirections: ['descend', 'ascend'],
render: (value: number) => (value / 1000000).toFixed(2),
},

View File

@@ -1,27 +1,32 @@
import React, { useEffect, useState } from "react";
import { Select, Button, Space, Form } from "antd";
import { cloneDeep } from "lodash";
import { Select as DefaultSelect, Button, Space, Form } from "antd";
import styled from "styled-components";
import { withRouter } from "react-router";
import { getLocalStorageRouteKey } from "./utils";
import { RouteComponentProps, useLocation } from "react-router-dom";
import { connect } from "react-redux";
import ROUTES from "Src/constants/routes";
import CustomDateTimeModal from "./CustomDateTimeModal";
import { GlobalTime, updateTimeInterval } from "../../../store/actions";
import { StoreState } from "../../../store/reducers";
import FormItem from "antd/lib/form/FormItem";
import {
Options,
ServiceMapOptions,
DefaultOptionsBasedOnRoute,
} from "./config";
import { DateTimeRangeType } from "../../../store/actions";
import { METRICS_PAGE_QUERY_PARAM } from "Src/constants/query";
import { LOCAL_STORAGE } from "Src/constants/localStorage";
import moment from "moment";
const { Option } = Select;
const { Option } = DefaultSelect;
const DateTimeWrapper = styled.div`
margin-top: 20px;
justify-content: flex-end !important;
`;
const Select = styled(DefaultSelect)``;
interface DateTimeSelectorProps extends RouteComponentProps<any> {
currentpath?: string;
updateTimeInterval: Function;
@@ -32,21 +37,34 @@ interface DateTimeSelectorProps extends RouteComponentProps<any> {
This components is mounted all the time. Use event listener to track changes.
*/
const _DateTimeSelector = (props: DateTimeSelectorProps) => {
const defaultTime = "30min";
const location = useLocation();
const LocalStorageRouteKey: string = getLocalStorageRouteKey(
location.pathname,
);
const timeDurationInLocalStorage =
JSON.parse(localStorage.getItem(LOCAL_STORAGE.METRICS_TIME_IN_DURATION)) ||
{};
const options =
location.pathname === ROUTES.SERVICE_MAP ? ServiceMapOptions : Options;
let defaultTime = DefaultOptionsBasedOnRoute[LocalStorageRouteKey]
? DefaultOptionsBasedOnRoute[LocalStorageRouteKey]
: DefaultOptionsBasedOnRoute.default;
if (timeDurationInLocalStorage[LocalStorageRouteKey]) {
defaultTime = timeDurationInLocalStorage[LocalStorageRouteKey];
}
const [currentLocalStorageRouteKey, setCurrentLocalStorageRouteKey] = useState(
LocalStorageRouteKey,
);
const [customDTPickerVisible, setCustomDTPickerVisible] = useState(false);
const [timeInterval, setTimeInterval] = useState(defaultTime);
const [startTime, setStartTime] = useState<moment.Moment | null>(null);
const [endTime, setEndTime] = useState<moment.Moment | null>(null);
const [refreshButtonHidden, setRefreshButtonHidden] = useState(false);
const [refreshText, setRefreshText] = useState("");
const [refreshButtonClick, setRefreshButtoClick] = useState(0);
const [refreshButtonClick, setRefreshButtonClick] = useState(0);
const [form_dtselector] = Form.useForm();
const location = useLocation();
const updateTimeOnQueryParamChange = () => {
const timeDurationInLocalStorage = localStorage.getItem(
LOCAL_STORAGE.METRICS_TIME_IN_DURATION,
);
const urlParams = new URLSearchParams(location.search);
const intervalInQueryParam = urlParams.get(METRICS_PAGE_QUERY_PARAM.interval);
const startTimeString = urlParams.get(METRICS_PAGE_QUERY_PARAM.startTime);
@@ -62,36 +80,46 @@ const _DateTimeSelector = (props: DateTimeSelectorProps) => {
const startTime = moment(Number(startTimeString));
const endTime = moment(Number(endTimeString));
setCustomTime(startTime, endTime, true);
} else if (currentLocalStorageRouteKey !== LocalStorageRouteKey) {
setMetricsTimeInterval(defaultTime);
setCurrentLocalStorageRouteKey(LocalStorageRouteKey);
}
// first pref: handle intervalInQueryParam
else if (intervalInQueryParam) {
window.localStorage.setItem(
LOCAL_STORAGE.METRICS_TIME_IN_DURATION,
intervalInQueryParam,
);
setMetricsTimeInterval(intervalInQueryParam);
} else if (timeDurationInLocalStorage) {
setMetricsTimeInterval(timeDurationInLocalStorage);
}
};
const setToLocalStorage = (val: string) => {
let timeDurationInLocalStorageObj = cloneDeep(timeDurationInLocalStorage);
if (timeDurationInLocalStorageObj) {
timeDurationInLocalStorageObj[LocalStorageRouteKey] = val;
} else {
timeDurationInLocalStorageObj = {
[LocalStorageRouteKey]: val,
};
}
window.localStorage.setItem(
LOCAL_STORAGE.METRICS_TIME_IN_DURATION,
JSON.stringify(timeDurationInLocalStorageObj),
);
};
useEffect(() => {
setMetricsTimeInterval(defaultTime);
}, []);
// On URL Change
useEffect(() => {
updateTimeOnQueryParamChange();
}, [location]);
//On mount
useEffect(() => {
updateTimeOnQueryParamChange();
}, []);
const setMetricsTimeInterval = (value: string) => {
props.updateTimeInterval(value);
setTimeInterval(value);
setEndTime(null);
setStartTime(null);
window.localStorage.setItem(LOCAL_STORAGE.METRICS_TIME_IN_DURATION, value);
setToLocalStorage(value);
};
const setCustomTime = (
startTime: moment.Moment,
@@ -173,7 +201,7 @@ const _DateTimeSelector = (props: DateTimeSelectorProps) => {
};
const handleRefresh = () => {
setRefreshButtoClick(refreshButtonClick + 1);
setRefreshButtonClick(refreshButtonClick + 1);
setMetricsTimeInterval(timeInterval);
};
@@ -187,15 +215,6 @@ const _DateTimeSelector = (props: DateTimeSelectorProps) => {
};
}, [props.location, refreshButtonClick]);
const options = [
{ value: "custom", label: "Custom" },
{ value: "15min", label: "Last 15 min" },
{ value: "30min", label: "Last 30 min" },
{ value: "1hr", label: "Last 1 hour" },
{ value: "6hr", label: "Last 6 hour" },
{ value: "1day", label: "Last 1 day" },
{ value: "1week", label: "Last 1 week" },
];
if (props.location.pathname.startsWith(ROUTES.USAGE_EXPLORER)) {
return null;
} else {
@@ -205,6 +224,7 @@ const _DateTimeSelector = (props: DateTimeSelectorProps) => {
"YYYY/MM/DD HH:mm",
)}`
: timeInterval;
return (
<DateTimeWrapper>
<Space style={{ float: "right", display: "block" }}>
@@ -256,8 +276,10 @@ const mapStateToProps = (state: StoreState): { globalTime: GlobalTime } => {
return { globalTime: state.globalTime };
};
export const DateTimeSelector = connect(mapStateToProps, {
updateTimeInterval: updateTimeInterval,
})(_DateTimeSelector);
export const DateTimeSelector = withRouter(
connect(mapStateToProps, {
updateTimeInterval: updateTimeInterval,
})(_DateTimeSelector),
);
export default withRouter(DateTimeSelector);
export default DateTimeSelector;

View File

@@ -0,0 +1,24 @@
import ROUTES from "Src/constants/routes";
export const Options = [
{ value: "5min", label: "Last 5 min" },
{ value: "15min", label: "Last 15 min" },
{ value: "30min", label: "Last 30 min" },
{ value: "1hr", label: "Last 1 hour" },
{ value: "6hr", label: "Last 6 hour" },
{ value: "1day", label: "Last 1 day" },
{ value: "1week", label: "Last 1 week" },
{ value: "custom", label: "Custom" },
];
export const ServiceMapOptions = [
{ value: "1min", label: "Last 1 min" },
{ value: "5min", label: "Last 5 min" },
];
export const DefaultOptionsBasedOnRoute = {
[ROUTES.SERVICE_MAP]: ServiceMapOptions[0].value,
[ROUTES.APPLICATION]: Options[0].value,
[ROUTES.SERVICE_METRICS]: Options[2].value,
default: Options[2].value,
};

View File

@@ -0,0 +1,18 @@
import ROUTES from "Src/constants/routes";
export const getLocalStorageRouteKey = (pathName: string) => {
let localStorageKey = "";
const pathNameSplit = pathName.split("/");
if (!pathNameSplit[2]) {
localStorageKey = pathName;
} else {
Object.keys(ROUTES).forEach((key) => {
if (ROUTES[key].indexOf(":") > -1) {
if (ROUTES[key].indexOf(pathNameSplit[1]) > -1) {
localStorageKey = ROUTES[key];
}
}
});
}
return localStorageKey;
};

View File

@@ -0,0 +1,83 @@
import React, { useContext, createContext, ReactNode, Dispatch } from "react";
import ROUTES from "Src/constants/routes";
type State = {
[key: string]: {
route: string;
isLoaded: boolean;
};
};
enum ActionTypes {
UPDATE_IS_LOADED = "ROUTE_IS_LOADED",
}
type Action = {
type: ActionTypes;
payload: string;
};
interface ContextType {
state: State;
dispatch: Dispatch<Action>;
}
const RouteContext = createContext<ContextType | null>(null);
interface RouteProviderProps {
children: ReactNode;
}
interface RouteObj {
[key: string]: {
route: string;
isLoaded: boolean;
};
}
const updateLocation = (state: State, action: Action): State => {
if (action.type === ActionTypes.UPDATE_IS_LOADED) {
/*
Update the isLoaded property in routes obj
if the route matches the current pathname
Why: Checkout this issue https://github.com/SigNoz/signoz/issues/110
To avoid calling the api's twice for Date picker,
We will only call once the route is changed
*/
Object.keys(ROUTES).map((items) => {
state[items].isLoaded = state[items].route === action.payload;
});
return {
...state,
};
}
return {
...state,
};
};
const getInitialState = () => {
const routes: RouteObj = {};
Object.keys(ROUTES).map((items) => {
routes[items] = {
route: `${ROUTES[items]}`,
isLoaded: false,
};
});
return routes;
};
const RouteProvider: React.FC<RouteProviderProps> = ({ children }) => {
const [state, dispatch] = React.useReducer(updateLocation, getInitialState());
const value = { state, dispatch };
return <RouteContext.Provider value={value}>{children}</RouteContext.Provider>;
};
const useRoute = (): ContextType => {
const context = useContext(RouteContext);
if (context === undefined) {
throw new Error("useRoute must be used within a RouteProvider");
}
return context as ContextType;
};
export { RouteProvider, useRoute };

View File

@@ -0,0 +1,75 @@
import React, { useState } from "react";
import { servicesItem } from "Src/store/actions";
import { InfoCircleOutlined } from "@ant-design/icons";
import { Select } from "antd";
import styled from "styled-components";
const { Option } = Select;
import { cloneDeep } from "lodash";
const Container = styled.div`
margin-top: 12px;
display: flex;
.info {
display:flex;
font-family: Roboto;
margin-left: auto;
margin-right: 12px;
color: #4F4F4F;
font-size: 14px;
.anticon-info-circle {
margin-top: 22px;
margin-right: 18px;
}
}
`;
interface SelectServiceProps {
services: servicesItem[];
zoomToService: (arg0: string) => void;
zoomToDefault: () => void;
}
const defaultOption = {
serviceName: "Default"
};
const SelectService = (props: SelectServiceProps) => {
const [selectedVal, setSelectedVal] = useState<string>(defaultOption.serviceName);
const { zoomToService, zoomToDefault } = props;
const services = cloneDeep(props.services);
services.unshift(defaultOption)
const handleSelect = (value: string) => {
if(value === defaultOption.serviceName){
zoomToDefault()
} else {
zoomToService(value);
}
setSelectedVal(value);
};
return (
<Container>
<Select
style={{ width: 270, marginBottom: "56px" }}
placeholder="Select a service"
onChange={handleSelect}
value={selectedVal}
>
{services.map(({ serviceName }) => (
<Option key={serviceName} value={serviceName}>
{serviceName}
</Option>
))}
</Select>
<div className='info'>
<InfoCircleOutlined />
<div>
<div>-> Size of circles is proportial to the number of requests served by each node </div>
<div>-> Click on node name to reposition the node</div>
</div>
</div>
</Container>
);
};
export default SelectService;

View File

@@ -1,71 +0,0 @@
import React from "react";
// import {useState} from "react";
import Graph from "react-graph-vis";
// import { graphEvents } from "react-graph-vis";
//PNOTE - types of react-graph-vis defined in typings folder.
//How is it imported directly?
// type definition for service graph - https://github.com/crubier/react-graph-vis/issues/80
// Set shapes - https://visjs.github.io/vis-network/docs/network/nodes.html#
// https://github.com/crubier/react-graph-vis/issues/93
const graph = {
nodes: [
{
id: 1,
label: "Catalogue",
shape: "box",
color: "green",
border: "black",
size: 100,
},
{ id: 2, label: "Users", shape: "box", color: "#FFFF00" },
{ id: 3, label: "Payment App", shape: "box", color: "#FB7E81" },
{ id: 4, label: "My Sql", shape: "box", size: 10, color: "#7BE141" },
{ id: 5, label: "Redis-db", shape: "box", color: "#6E6EFD" },
],
edges: [
{ from: 1, to: 2, color: { color: "red" }, size: { size: 20 } },
{ from: 2, to: 3, color: { color: "red" } },
{ from: 1, to: 3, color: { color: "red" } },
{ from: 3, to: 4, color: { color: "red" } },
{ from: 3, to: 5, color: { color: "red" } },
],
};
const options = {
layout: {
hierarchical: true,
},
edges: {
color: "#000000",
},
height: "500px",
};
// const events = {
// select: function(event:any) { //PNOTE - TO DO - Get rid of any type
// var { nodes, edges } = event;
// }
// };
const ServiceGraph = () => {
// const [network, setNetwork] = useState(null);
return (
<React.Fragment>
<div> Updated Service Graph module coming soon..</div>
<Graph
graph={graph}
options={options}
// events={events}
// getNetwork={network => {
// // if you want access to vis.js network api you can set the state in a parent component using this property
// }}
/>
</React.Fragment>
);
};
export default ServiceGraph;

View File

@@ -1,14 +1,156 @@
import React from "react";
import ServiceGraph from "./ServiceGraph";
import React, { useEffect, useRef } from "react";
import { connect } from "react-redux";
import { RouteComponentProps } from "react-router-dom";
import {
GlobalTime,
serviceMapStore,
getServiceMapItems,
getDetailedServiceMapItems,
} from "Src/store/actions";
import { Spin } from "antd";
import styled from "styled-components";
import { StoreState } from "../../store/reducers";
const ServiceMap = () => {
import { getZoomPx, getGraphData, getTooltip, transformLabel } from "./utils";
import SelectService from "./SelectService";
import { ForceGraph2D } from "react-force-graph";
import { useRoute } from "../RouteProvider";
const Container = styled.div`
.force-graph-container .graph-tooltip {
background: black;
padding: 1px;
.keyval {
display: flex;
.key {
margin-right: 4px;
}
.val {
margin-left: auto;
}
}
}
`;
interface ServiceMapProps extends RouteComponentProps<any> {
serviceMap: serviceMapStore;
globalTime: GlobalTime;
getServiceMapItems: Function;
getDetailedServiceMapItems: Function;
}
interface graphNode {
id: string;
group: number;
}
interface graphLink {
source: string;
target: string;
value: number;
}
export interface graphDataType {
nodes: graphNode[];
links: graphLink[];
}
const ServiceMap = (props: ServiceMapProps) => {
const fgRef = useRef();
const { state } = useRoute();
const {
getDetailedServiceMapItems,
getServiceMapItems,
globalTime,
serviceMap,
} = props;
useEffect(() => {
/*
Call the apis only when the route is loaded.
Check this issue: https://github.com/SigNoz/signoz/issues/110
*/
if (state.SERVICE_MAP.isLoaded) {
getServiceMapItems(globalTime);
getDetailedServiceMapItems(globalTime);
}
}, [globalTime]);
useEffect(() => {
fgRef.current && fgRef.current.d3Force("charge").strength(-400);
});
if (!serviceMap.items.length || !serviceMap.services.length) {
return <Spin />;
}
const zoomToService = (value: string) => {
fgRef && fgRef.current.zoomToFit(700, getZoomPx(), (e) => e.id === value);
};
const zoomToDefault = () => {
fgRef && fgRef.current.zoomToFit(100, 120);
};
const { nodes, links } = getGraphData(serviceMap);
const graphData = { nodes, links };
return (
<div>
{" "}
Service Map module coming soon...
{/* <ServiceGraph /> */}
</div>
<Container>
<SelectService
services={serviceMap.services}
zoomToService={zoomToService}
zoomToDefault={zoomToDefault}
/>
<ForceGraph2D
ref={fgRef}
cooldownTicks={100}
graphData={graphData}
nodeLabel={getTooltip}
linkAutoColorBy={(d) => d.target}
linkDirectionalParticles="value"
linkDirectionalParticleSpeed={(d) => d.value}
nodeCanvasObject={(node, ctx, globalScale) => {
const label = transformLabel(node.id);
const fontSize = node.fontSize;
ctx.font = `${fontSize}px Roboto`;
const width = node.width;
ctx.fillStyle = node.color;
ctx.beginPath();
ctx.arc(node.x, node.y, width, 0, 2 * Math.PI, false);
ctx.fill();
ctx.textAlign = "center";
ctx.textBaseline = "middle";
ctx.fillStyle = "#646464";
ctx.fillText(label, node.x, node.y);
}}
onNodeClick={(node) => {
const tooltip = document.querySelector(".graph-tooltip");
if (tooltip && node) {
tooltip.innerHTML = getTooltip(node);
}
}}
nodePointerAreaPaint={(node, color, ctx) => {
ctx.fillStyle = color;
ctx.beginPath();
ctx.arc(node.x, node.y, 5, 0, 2 * Math.PI, false);
ctx.fill();
}}
/>
</Container>
);
};
export default ServiceMap;
const mapStateToProps = (
state: StoreState,
): {
serviceMap: serviceMapStore;
globalTime: GlobalTime;
} => {
return {
serviceMap: state.serviceMap,
globalTime: state.globalTime,
};
};
export default connect(mapStateToProps, {
getServiceMapItems: getServiceMapItems,
getDetailedServiceMapItems: getDetailedServiceMapItems,
})(ServiceMap);

View File

@@ -0,0 +1 @@
export { default } from "./ServiceMap";

View File

@@ -0,0 +1,119 @@
import { uniqBy, uniq, maxBy, cloneDeep, find } from "lodash";
import { serviceMapStore } from "Src/store/actions";
import { graphDataType } from "./ServiceMap";
const MIN_WIDTH = 10;
const MAX_WIDTH = 20;
const DEFAULT_FONT_SIZE = 6;
export const getDimensions = (num, highest) => {
const percentage = (num / highest) * 100;
const width = (percentage * (MAX_WIDTH - MIN_WIDTH)) / 100 + MIN_WIDTH;
const fontSize = DEFAULT_FONT_SIZE;
return {
fontSize,
width,
};
};
export const getGraphData = (serviceMap: serviceMapStore): graphDataType => {
const { items, services } = serviceMap;
const highestCallCount = maxBy(items, (e) => e.callCount).callCount;
const highestCallRate = maxBy(services, (e) => e.callRate).callRate;
const divNum = Number(
String(1).padEnd(highestCallCount.toString().length, "0"),
);
const links = cloneDeep(items).map((node) => {
const { parent, child, callCount } = node;
return {
source: parent,
target: child,
value: (100 - callCount / divNum) * 0.03,
};
});
const uniqParent = uniqBy(cloneDeep(items), "parent").map((e) => e.parent);
const uniqChild = uniqBy(cloneDeep(items), "child").map((e) => e.child);
const uniqNodes = uniq([...uniqParent, ...uniqChild]);
const nodes = uniqNodes.map((node, i) => {
const service = find(services, (service) => service.serviceName === node);
let color = "#88CEA5";
if (!service) {
return {
id: node,
group: i + 1,
fontSize: DEFAULT_FONT_SIZE,
width: MIN_WIDTH,
color,
nodeVal: MIN_WIDTH,
callRate: 0,
errorRate: 0,
p99: 0,
};
}
if (service.errorRate > 0) {
color = "#F98989";
} else if (service.fourXXRate > 0) {
color = "#F9DA7B";
}
const { fontSize, width } = getDimensions(service.callRate, highestCallRate);
return {
id: node,
group: i + 1,
fontSize,
width,
color,
nodeVal: width,
callRate: service.callRate.toFixed(2),
errorRate: service.errorRate,
p99: service.p99,
};
});
return {
nodes,
links,
};
};
export const getZoomPx = (): number => {
const width = window.screen.width;
if (width < 1400) {
return 190;
} else if (width > 1400 && width < 1700) {
return 380;
} else if (width > 1700) {
return 470;
}
return 190;
};
export const getTooltip = (node: {
p99: number;
errorRate: number;
callRate: number;
id: string;
}) => {
return `<div style="color:#333333;padding:12px;background: white;border-radius: 2px;">
<div style="font-weight:bold; margin-bottom:16px;">${node.id}</div>
<div class="keyval">
<div class="key">P99 latency:</div>
<div class="val">${node.p99 / 1000000}ms</div>
</div>
<div class="keyval">
<div class="key">Request:</div>
<div class="val">${node.callRate}/sec</div>
</div>
<div class="keyval">
<div class="key">Error Rate:</div>
<div class="val">${node.errorRate}%</div>
</div>
</div>`;
};
export const transformLabel = (label: string) => {
const MAX_LENGTH = 13;
const MAX_SHOW = 10;
if (label.length > MAX_LENGTH) {
return `${label.slice(0, MAX_SHOW)}...`;
}
return label;
};

View File

@@ -2,15 +2,11 @@ import React, { useState, useEffect } from "react";
import GenericVisualizations from "../Metrics/GenericVisualization";
import { Select, Card, Space, Form } from "antd";
import { connect } from "react-redux";
import { StoreState } from "../../store/reducers";
import {
customMetricsItem,
getFilteredTraceMetrics,
GlobalTime,
TraceFilters,
} from "../../store/actions";
import { GlobalTime, TraceFilters } from "../../store/actions";
import { useRoute } from "../RouteProvider";
import { getFilteredTraceMetrics } from "../../store/actions/MetricsActions";
import { customMetricsItem } from "../../store/actions/MetricsActions";
const { Option } = Select;
const entity = [
@@ -48,10 +44,10 @@ const aggregation_options = [
{
linked_entity: "duration",
default_selected: { title: "p99", dataindex: "p99" },
// options_available: [ {title:'Avg', dataindex:'avg'}, {title:'Max', dataindex:'max'},{title:'Min', dataindex:'min'}, {title:'p50', dataindex:'p50'},{title:'p90', dataindex:'p90'}, {title:'p95', dataindex:'p95'}]
// options_available: [ {title:'Avg', dataindex:'avg'}, {title:'Max', dataindex:'max'},{title:'Min', dataindex:'min'}, {title:'p50', dataindex:'p50'},{title:'p95', dataindex:'p95'}, {title:'p95', dataindex:'p95'}]
options_available: [
{ title: "p50", dataindex: "p50" },
{ title: "p90", dataindex: "p90" },
{ title: "p95", dataindex: "p95" },
{ title: "p99", dataindex: "p99" },
],
},
@@ -80,7 +76,10 @@ interface TraceCustomVisualizationsProps {
const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
const [selectedEntity, setSelectedEntity] = useState("calls");
const [selectedAggOption, setSelectedAggOption] = useState("count");
const [selectedStep, setSelectedStep] = useState("60");
const { state } = useRoute();
const [form] = Form.useForm();
const selectedStep = "60";
// Step should be multiples of 60, 60 -> 1 min
useEffect(() => {
@@ -109,21 +108,18 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
minTime: props.globalTime.minTime - 15 * 60 * 1000000000,
maxTime: props.globalTime.maxTime + 15 * 60 * 1000000000,
};
props.getFilteredTraceMetrics(request_string, plusMinus15);
/*
Call the apis only when the route is loaded.
Check this issue: https://github.com/SigNoz/signoz/issues/110
*/
if (state.TRACES.isLoaded) {
props.getFilteredTraceMetrics(request_string, plusMinus15);
}
}, [selectedEntity, selectedAggOption, props.traceFilters, props.globalTime]);
//Custom metrics API called if time, tracefilters, selected entity or agg option changes
const [form] = Form.useForm();
function handleChange(value: string) {
// console.log(value);
}
function handleFinish(value: string) {
// console.log(value);
}
// PNOTE - Can also use 'coordinate' option in antd Select for implementing this - https://ant.design/components/select/
const handleFormValuesChange = (changedValues: any) => {
const formFieldName = Object.keys(changedValues)[0];
@@ -152,11 +148,9 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
return (
<Card>
{/* <Space direction="vertical"> */}
<div>Custom Visualizations</div>
<Form
form={form}
onFinish={handleFinish}
onValuesChange={handleFormValuesChange}
initialValues={{
agg_options: "Count",
@@ -189,7 +183,7 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
</Form.Item>
<Form.Item name="chart_style">
<Select style={{ width: 120 }} onChange={handleChange} allowClear>
<Select style={{ width: 120 }} allowClear>
<Option value="line">Line Chart</Option>
<Option value="bar">Bar Chart</Option>
<Option value="area">Area Chart</Option>
@@ -197,7 +191,7 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
</Form.Item>
<Form.Item name="interval">
<Select style={{ width: 120 }} onChange={handleChange} allowClear>
<Select style={{ width: 120 }} allowClear>
<Option value="1m">1 min</Option>
<Option value="5m">5 min</Option>
<Option value="30m">30 min</Option>
@@ -206,7 +200,7 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
{/* Need heading for each option */}
<Form.Item name="group_by">
<Select style={{ width: 120 }} onChange={handleChange} allowClear>
<Select style={{ width: 120 }} allowClear>
<Option value="none">Group By</Option>
<Option value="status">Status Code</Option>
<Option value="protocol">Protocol</Option>
@@ -229,7 +223,7 @@ const mapStateToProps = (
traceFilters: TraceFilters;
} => {
return {
filteredTraceMetrics: state.filteredTraceMetrics,
filteredTraceMetrics: state.metricsData.customMetricsItem,
globalTime: state.globalTime,
traceFilters: state.traceFilters,
};

View File

@@ -18,6 +18,7 @@ import FormItem from "antd/lib/form/FormItem";
import api, { apiV1 } from "../../api";
import { useLocation } from "react-router-dom";
import { METRICS_PAGE_QUERY_PARAM } from "Src/constants/query";
import { useRoute } from "../RouteProvider";
const { Option } = Select;
@@ -45,6 +46,7 @@ const _TraceFilter = (props: TraceFilterProps) => {
const [tagKeyOptions, setTagKeyOptions] = useState<TagKeyOptionItem[]>([]);
const location = useLocation();
const urlParams = new URLSearchParams(location.search.split("?")[1]);
const { state } = useRoute();
useEffect(() => {
handleApplyFilterForm({
@@ -122,7 +124,13 @@ const _TraceFilter = (props: TraceFilterProps) => {
"&tags=" +
encodeURIComponent(JSON.stringify(props.traceFilters.tags));
props.fetchTraces(props.globalTime, request_string);
/*
Call the apis only when the route is loaded.
Check this issue: https://github.com/SigNoz/signoz/issues/110
*/
if (state.TRACES.isLoaded) {
props.fetchTraces(props.globalTime, request_string);
}
}, [props.traceFilters, props.globalTime]);
useEffect(() => {

View File

@@ -36,10 +36,16 @@
stroke-linecap: round;
stroke-linejoin: round;
}
/* Prevent text vertical shift on hover */
.d3-flame-graph-label {
border: 1px dotted transparent;
cursor: pointer;
}
/* Transparency simulates sub pixel border https://stackoverflow.com/questions/13891177/css-border-less-than-1px */
.d3-flame-graph-label:hover {
border: 1px dotted;
border-color: rgba(255, 255, 255, 0.75);
}
/*
@@ -47,3 +53,7 @@
border: 1px solid;
border-color: rgba(255, 255, 255, 0.75);
} */
.fade:not(.show) {
opacity: 0.5;
}

View File

@@ -13,7 +13,6 @@ import "./TraceGraph.css";
import { spanToTreeUtil } from "../../utils/spanToTree";
import { fetchTraceItem, spansWSameTraceIDResponse } from "../../store/actions";
import { StoreState } from "../../store/reducers";
import { TraceGraphColumn } from "./TraceGraphColumn";
import SelectedSpanDetails from "./SelectedSpanDetails";
interface TraceGraphProps {
@@ -71,11 +70,7 @@ const _TraceGraph = (props: TraceGraphProps) => {
return (
<Row gutter={{ xs: 8, sm: 16, md: 24, lg: 32 }}>
{/*<Col md={8} sm={24}>*/}
{/* <TraceGraphColumn />*/}
{/*</Col>*/}
<Col md={24} sm={24}>
{/* <Card style={{ width: 640 }}> */}
<Space direction="vertical" size="middle" style={{ width: "100%" }}>
<Card bodyStyle={{ padding: 80 }} style={{ height: 320 }}>
<div

View File

@@ -1,18 +1,19 @@
import React, { useEffect, useMemo, useState } from "react";
import React, { useEffect, useState } from "react";
import { Bar } from "react-chartjs-2";
import { Card, Form, Select, Space } from "antd";
import { Card, Select, Space } from "antd";
import { connect } from "react-redux";
import {
getServicesList,
getUsageData,
GlobalTime,
servicesListItem,
usageDataItem,
} from "../../store/actions";
import { StoreState } from "../../store/reducers";
import moment from "moment";
import { isOnboardingSkipped } from "../../utils/app";
import { useRoute } from "../RouteProvider";
import { servicesListItem } from "../../store/actions/MetricsActions";
const { Option } = Select;
interface UsageExplorerProps {
@@ -56,6 +57,8 @@ const _UsageExplorer = (props: UsageExplorerProps) => {
const [selectedInterval, setSelectedInterval] = useState(interval[2]);
const [selectedService, setSelectedService] = useState<string>("");
const { state } = useRoute();
useEffect(() => {
if (selectedTime && selectedInterval) {
const maxTime = new Date().getTime() * 1000000;
@@ -71,7 +74,13 @@ const _UsageExplorer = (props: UsageExplorerProps) => {
}, [selectedTime, selectedInterval, selectedService]);
useEffect(() => {
props.getServicesList(props.globalTime);
/*
Call the apis only when the route is loaded.
Check this issue: https://github.com/SigNoz/signoz/issues/110
*/
if (state.USAGE_EXPLORER.isLoaded) {
props.getServicesList(props.globalTime);
}
}, []);
const data = {
@@ -203,7 +212,7 @@ const mapStateToProps = (
totalCount: totalCount,
usageData: state.usageDate,
globalTime: state.globalTime,
servicesList: state.servicesList,
servicesList: state.metricsData.serviceList,
};
};

View File

@@ -0,0 +1,3 @@
export * from "./metricsInterfaces";
export * from "./metricsActionTypes";
export * from "./metricsActions";

View File

@@ -0,0 +1,32 @@
import {
externalErrCodeMetricsActions,
externalMetricsAvgDurationAction,
getDbOverViewMetricsAction,
getExternalMetricsAction,
getFilteredTraceMetricsAction,
getServiceMetricsAction,
getServicesListAction,
getTopEndpointsAction,
} from "./metricsInterfaces";
export enum MetricsActionTypes {
updateInput = "UPDATE_INPUT",
getServicesList = "GET_SERVICE_LIST",
getServiceMetrics = "GET_SERVICE_METRICS",
getAvgDurationMetrics = "GET_AVG_DURATION_METRICS",
getErrCodeMetrics = "GET_ERR_CODE_METRICS",
getDbOverviewMetrics = "GET_DB_OVERVIEW_METRICS",
getExternalMetrics = "GET_EXTERNAL_METRICS",
getTopEndpoints = "GET_TOP_ENDPOINTS",
getFilteredTraceMetrics = "GET_FILTERED_TRACE_METRICS",
}
export type MetricsActions =
| getServicesListAction
| getServiceMetricsAction
| getTopEndpointsAction
| getFilteredTraceMetricsAction
| getExternalMetricsAction
| externalErrCodeMetricsActions
| getDbOverViewMetricsAction
| externalMetricsAvgDurationAction;

View File

@@ -0,0 +1,190 @@
import { Dispatch } from "redux";
import api, { apiV1 } from "../../../api";
import { GlobalTime } from "../global";
import { toUTCEpoch } from "../../../utils/timeUtils";
import { MetricsActionTypes } from "./metricsActionTypes";
import * as MetricsInterfaces from "./metricsInterfaces";
export const getServicesList = (globalTime: GlobalTime) => {
return async (dispatch: Dispatch) => {
let request_string =
"/services?start=" + globalTime.minTime + "&end=" + globalTime.maxTime;
const response = await api.get<MetricsInterfaces.servicesListItem[]>(
apiV1 + request_string,
);
dispatch<MetricsInterfaces.getServicesListAction>({
type: MetricsActionTypes.getServicesList,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};
export const getDbOverViewMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/dbOverview?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<MetricsInterfaces.dbOverviewMetricsItem[]>(
apiV1 + request_string,
);
dispatch<MetricsInterfaces.getDbOverViewMetricsAction>({
type: MetricsActionTypes.getDbOverviewMetrics,
payload: response.data,
});
};
};
export const getExternalMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/external?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<MetricsInterfaces.externalMetricsItem[]>(
apiV1 + request_string,
);
dispatch<MetricsInterfaces.getExternalMetricsAction>({
type: MetricsActionTypes.getExternalMetrics,
payload: response.data,
});
};
};
export const getExternalAvgDurationMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/externalAvgDuration?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<
MetricsInterfaces.externalMetricsAvgDurationItem[]
>(apiV1 + request_string);
dispatch<MetricsInterfaces.externalMetricsAvgDurationAction>({
type: MetricsActionTypes.getAvgDurationMetrics,
payload: response.data,
});
};
};
export const getExternalErrCodeMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/externalErrors?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<
MetricsInterfaces.externalErrCodeMetricsItem[]
>(apiV1 + request_string);
dispatch<MetricsInterfaces.externalErrCodeMetricsActions>({
type: MetricsActionTypes.getErrCodeMetrics,
payload: response.data,
});
};
};
export const getServicesMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/overview?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<MetricsInterfaces.metricItem[]>(
apiV1 + request_string,
);
dispatch<MetricsInterfaces.getServiceMetricsAction>({
type: MetricsActionTypes.getServiceMetrics,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};
export const getTopEndpoints = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/top_endpoints?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime;
const response = await api.get<MetricsInterfaces.topEndpointListItem[]>(
apiV1 + request_string,
);
dispatch<MetricsInterfaces.getTopEndpointsAction>({
type: MetricsActionTypes.getTopEndpoints,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};
export const getFilteredTraceMetrics = (
filter_params: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/spans/aggregates?start=" +
toUTCEpoch(globalTime.minTime) +
"&end=" +
toUTCEpoch(globalTime.maxTime) +
"&" +
filter_params;
const response = await api.get<MetricsInterfaces.customMetricsItem[]>(
apiV1 + request_string,
);
dispatch<MetricsInterfaces.getFilteredTraceMetricsAction>({
type: MetricsActionTypes.getFilteredTraceMetrics,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};

View File

@@ -0,0 +1,98 @@
import { MetricsActionTypes } from "./metricsActionTypes";
export interface servicesListItem {
serviceName: string;
p99: number;
avgDuration: number;
numCalls: number;
callRate: number;
numErrors: number;
errorRate: number;
}
export interface metricItem {
timestamp: number;
p50: number;
p95: number;
p99: number;
numCalls: number;
callRate: number;
numErrors: number;
errorRate: number;
}
export interface externalMetricsAvgDurationItem {
avgDuration: number;
timestamp: number;
}
export interface externalErrCodeMetricsItem {
externalHttpUrl: string;
numCalls: number;
timestamp: number;
callRate: number;
}
export interface topEndpointListItem {
p50: number;
p95: number;
p99: number;
numCalls: number;
name: string;
}
export interface externalMetricsItem {
avgDuration: number;
callRate: number;
externalHttpUrl: string;
numCalls: number;
timestamp: number;
}
export interface dbOverviewMetricsItem {
avgDuration: number;
callRate: number;
dbSystem: string;
numCalls: number;
timestamp: number;
}
export interface customMetricsItem {
timestamp: number;
value: number;
}
export interface getServicesListAction {
type: MetricsActionTypes.getServicesList;
payload: servicesListItem[];
}
export interface externalErrCodeMetricsActions {
type: MetricsActionTypes.getErrCodeMetrics;
payload: externalErrCodeMetricsItem[];
}
export interface externalMetricsAvgDurationAction {
type: MetricsActionTypes.getAvgDurationMetrics;
payload: externalMetricsAvgDurationItem[];
}
export interface getServiceMetricsAction {
type: MetricsActionTypes.getServiceMetrics;
payload: metricItem[];
}
export interface getExternalMetricsAction {
type: MetricsActionTypes.getExternalMetrics;
payload: externalMetricsItem[];
}
export interface getDbOverViewMetricsAction {
type: MetricsActionTypes.getDbOverviewMetrics;
payload: dbOverviewMetricsItem[];
}
export interface getTopEndpointsAction {
type: MetricsActionTypes.getTopEndpoints;
payload: topEndpointListItem[];
}
export interface getFilteredTraceMetricsAction {
type: MetricsActionTypes.getFilteredTraceMetrics;
payload: customMetricsItem[];
}

View File

@@ -23,6 +23,15 @@ export const updateTimeInterval = (
// set directly based on that. Assuming datetimeRange values are in ms, and minTime is 0th element
switch (interval) {
case "1min":
maxTime = Date.now() * 1000000; // in nano sec
minTime = (Date.now() - 1 * 60 * 1000) * 1000000;
break;
case "5min":
maxTime = Date.now() * 1000000; // in nano sec
minTime = (Date.now() - 5 * 60 * 1000) * 1000000;
break;
case "15min":
maxTime = Date.now() * 1000000; // in nano sec
minTime = (Date.now() - 15 * 60 * 1000) * 1000000;

View File

@@ -1,6 +1,7 @@
export * from "./types";
export * from "./traceFilters";
export * from "./serviceMap";
export * from "./traces";
export * from "./metrics";
export * from "./MetricsActions";
export * from "./usage";
export * from "./global";

View File

@@ -1,277 +0,0 @@
import { Dispatch } from "redux";
import api, { apiV1 } from "../../api";
import { GlobalTime } from "./global";
import { ActionTypes } from "./types";
import { Token } from "../../utils/token";
import { toUTCEpoch } from "../../utils/timeUtils";
export interface servicesListItem {
serviceName: string;
p99: number;
avgDuration: number;
numCalls: number;
callRate: number;
numErrors: number;
errorRate: number;
}
export interface metricItem {
timestamp: number;
p50: number;
p90: number;
p99: number;
numCalls: number;
callRate: number;
numErrors: number;
errorRate: number;
}
export interface externalMetricsAvgDurationItem {
avgDuration: number;
timestamp: number;
}
export interface externalErrCodeMetricsItem {
errorRate: number;
externalHttpUrl: string;
numErrors: number;
timestamp: number;
}
export interface topEndpointListItem {
p50: number;
p90: number;
p99: number;
numCalls: number;
name: string;
}
export interface externalMetricsItem {
avgDuration: number;
callRate: number;
externalHttpUrl: string;
numCalls: number;
timestamp: number;
}
export interface dbOverviewMetricsItem {
avgDuration: number;
callRate: number;
dbSystem: string;
numCalls: number;
timestamp: number;
}
export interface customMetricsItem {
timestamp: number;
value: number;
}
export interface getServicesListAction {
type: ActionTypes.getServicesList;
payload: servicesListItem[];
}
export interface externalErrCodeMetricsActions {
type: ActionTypes.getErrCodeMetrics;
payload: externalErrCodeMetricsItem[];
}
export interface externalMetricsAvgDurationAction {
type: ActionTypes.getAvgDurationMetrics;
payload: externalMetricsAvgDurationItem[];
}
export interface getServiceMetricsAction {
type: ActionTypes.getServiceMetrics;
payload: metricItem[];
}
export interface getExternalMetricsAction {
type: ActionTypes.getExternalMetrics;
payload: externalMetricsItem[];
}
export interface getDbOverViewMetricsAction {
type: ActionTypes.getDbOverviewMetrics;
payload: dbOverviewMetricsItem[];
}
export interface getTopEndpointsAction {
type: ActionTypes.getTopEndpoints;
payload: topEndpointListItem[];
}
export interface getFilteredTraceMetricsAction {
type: ActionTypes.getFilteredTraceMetrics;
payload: customMetricsItem[];
}
export const getServicesList = (globalTime: GlobalTime) => {
return async (dispatch: Dispatch) => {
let request_string =
"/services?start=" + globalTime.minTime + "&end=" + globalTime.maxTime;
const response = await api.get<servicesListItem[]>(apiV1 + request_string);
dispatch<getServicesListAction>({
type: ActionTypes.getServicesList,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};
export const getDbOverViewMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/dbOverview?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<dbOverviewMetricsItem[]>(
apiV1 + request_string,
);
dispatch<getDbOverViewMetricsAction>({
type: ActionTypes.getDbOverviewMetrics,
payload: response.data,
});
};
};
export const getExternalMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/external?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<externalMetricsItem[]>(apiV1 + request_string);
dispatch<getExternalMetricsAction>({
type: ActionTypes.getExternalMetrics,
payload: response.data,
});
};
};
export const getExternalAvgDurationMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/externalAvgDuration?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<externalMetricsAvgDurationItem[]>(
apiV1 + request_string,
);
dispatch<externalMetricsAvgDurationAction>({
type: ActionTypes.getAvgDurationMetrics,
payload: response.data,
});
};
};
export const getExternalErrCodeMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/externalErrors?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<externalErrCodeMetricsItem[]>(
apiV1 + request_string,
);
dispatch<externalErrCodeMetricsActions>({
type: ActionTypes.getErrCodeMetrics,
payload: response.data,
});
};
};
export const getServicesMetrics = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/overview?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime +
"&step=60";
const response = await api.get<metricItem[]>(apiV1 + request_string);
dispatch<getServiceMetricsAction>({
type: ActionTypes.getServiceMetrics,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};
export const getTopEndpoints = (
serviceName: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/service/top_endpoints?service=" +
serviceName +
"&start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime;
const response = await api.get<topEndpointListItem[]>(apiV1 + request_string);
dispatch<getTopEndpointsAction>({
type: ActionTypes.getTopEndpoints,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};
export const getFilteredTraceMetrics = (
filter_params: string,
globalTime: GlobalTime,
) => {
return async (dispatch: Dispatch) => {
let request_string =
"/spans/aggregates?start=" +
toUTCEpoch(globalTime.minTime) +
"&end=" +
toUTCEpoch(globalTime.maxTime) +
"&" +
filter_params;
const response = await api.get<customMetricsItem[]>(apiV1 + request_string);
dispatch<getFilteredTraceMetricsAction>({
type: ActionTypes.getFilteredTraceMetrics,
payload: response.data,
//PNOTE - response.data in the axios response has the actual API response
});
};
};

View File

@@ -0,0 +1,78 @@
import { Dispatch } from "redux";
import api, { apiV1 } from "../../api";
import { GlobalTime } from "./global";
import { ActionTypes } from "./types";
export interface serviceMapStore {
items: servicesMapItem[];
services: servicesItem[];
}
export interface servicesItem {
serviceName: string;
p99: number;
avgDuration: number;
numCalls: number;
callRate: number;
numErrors: number;
errorRate: number;
num4XX: number;
fourXXRate: number;
}
export interface servicesMapItem {
parent: string;
child: string;
callCount: number;
}
export interface serviceMapItemAction {
type: ActionTypes.getServiceMapItems;
payload: servicesMapItem[];
}
export interface servicesAction {
type: ActionTypes.getServices;
payload: servicesItem[];
}
export const getServiceMapItems = (globalTime: GlobalTime) => {
return async (dispatch: Dispatch) => {
dispatch<serviceMapItemAction>({
type: ActionTypes.getServiceMapItems,
payload: [],
});
let request_string =
"/serviceMapDependencies?start=" +
globalTime.minTime +
"&end=" +
globalTime.maxTime;
const response = await api.get<servicesMapItem[]>(apiV1 + request_string);
dispatch<serviceMapItemAction>({
type: ActionTypes.getServiceMapItems,
payload: response.data,
});
};
};
export const getDetailedServiceMapItems = (globalTime: GlobalTime) => {
return async (dispatch: Dispatch) => {
dispatch<servicesAction>({
type: ActionTypes.getServices,
payload: [],
});
let request_string =
"/services?start=" + globalTime.minTime + "&end=" + globalTime.maxTime;
const response = await api.get<servicesItem[]>(apiV1 + request_string);
dispatch<servicesAction>({
type: ActionTypes.getServices,
payload: response.data,
});
};
};

View File

@@ -32,16 +32,4 @@ export const updateTraceFilters = (traceFilters: TraceFilters) => {
};
};
export interface updateInputTagAction {
type: ActionTypes.updateInput;
payload: string;
}
export const updateInputTag = (Input: string) => {
return {
type: ActionTypes.updateInput,
payload: Input,
};
};
//named export when you want to export multiple functions from the same file

View File

@@ -1,33 +1,18 @@
import { FetchTracesAction, FetchTraceItemAction } from "./traces";
import { updateTraceFiltersAction, updateInputTagAction } from "./traceFilters";
import {
getServicesListAction,
getServiceMetricsAction,
externalErrCodeMetricsActions,
externalMetricsAvgDurationAction,
getExternalMetricsAction,
getTopEndpointsAction,
getFilteredTraceMetricsAction,
getDbOverViewMetricsAction,
} from "./metrics";
import { serviceMapItemAction, servicesAction } from "./serviceMap";
import { getUsageDataAction } from "./usage";
import { updateTimeIntervalAction } from "./global";
export enum ActionTypes {
updateTraceFilters = "UPDATE_TRACES_FILTER",
updateInput = "UPDATE_INPUT",
updateTimeInterval = "UPDATE_TIME_INTERVAL",
getServiceMapItems = "GET_SERVICE_MAP_ITEMS",
getServices = "GET_SERVICES",
getUsageData = "GET_USAGE_DATE",
fetchTraces = "FETCH_TRACES",
fetchTraceItem = "FETCH_TRACE_ITEM",
getServicesList = "GET_SERVICE_LIST",
getServiceMetrics = "GET_SERVICE_METRICS",
getAvgDurationMetrics = "GET_AVG_DURATION_METRICS",
getErrCodeMetrics = "GET_ERR_CODE_METRICS",
getDbOverviewMetrics = "GET_DB_OVERVIEW_METRICS",
getExternalMetrics = "GET_EXTERNAL_METRICS",
getTopEndpoints = "GET_TOP_ENDPOINTS",
getUsageData = "GET_USAGE_DATE",
updateTimeInterval = "UPDATE_TIME_INTERVAL",
getFilteredTraceMetrics = "GET_FILTERED_TRACE_METRICS",
}
export type Action =
@@ -35,13 +20,7 @@ export type Action =
| FetchTracesAction
| updateTraceFiltersAction
| updateInputTagAction
| getServicesListAction
| getServiceMetricsAction
| getTopEndpointsAction
| getUsageDataAction
| updateTimeIntervalAction
| getFilteredTraceMetricsAction
| getExternalMetricsAction
| externalErrCodeMetricsActions
| getDbOverViewMetricsAction
| externalMetricsAvgDurationAction;
| servicesAction
| serviceMapItemAction;

View File

@@ -2,7 +2,6 @@ import { Dispatch } from "redux";
import api, { apiV1 } from "../../api";
import { ActionTypes } from "./types";
import { GlobalTime } from "./global";
import { toUTCEpoch } from "../../utils/timeUtils";
export interface usageDataItem {

View File

@@ -2,64 +2,36 @@ import { combineReducers } from "redux";
import {
traceResponseNew,
spansWSameTraceIDResponse,
servicesListItem,
metricItem,
topEndpointListItem,
externalMetricsItem,
externalMetricsAvgDurationItem,
usageDataItem,
GlobalTime,
externalErrCodeMetricsItem,
customMetricsItem,
serviceMapStore,
TraceFilters,
} from "../actions";
import { updateGlobalTimeReducer } from "./global";
import {
filteredTraceMetricsReducer,
serviceMetricsReducer,
externalErrCodeMetricsReducer,
serviceTableReducer,
topEndpointsReducer,
dbOverviewMetricsReducer,
externalMetricsReducer,
externalAvgDurationMetricsReducer,
} from "./metrics";
import { traceFiltersReducer, inputsReducer } from "./traceFilters";
import { MetricsInitialState, metricsReducer } from "./metrics";
import TraceFilterReducer from "./traceFilters";
import { traceItemReducer, tracesReducer } from "./traces";
import { usageDataReducer } from "./usage";
import { ServiceMapReducer } from "./serviceMap";
export interface StoreState {
metricsData: MetricsInitialState;
traceFilters: TraceFilters;
inputTag: string;
traces: traceResponseNew;
traceItem: spansWSameTraceIDResponse;
servicesList: servicesListItem[];
serviceMetrics: metricItem[];
topEndpointsList: topEndpointListItem[];
externalMetrics: externalMetricsItem[];
dbOverviewMetrics: externalMetricsItem[];
externalAvgDurationMetrics: externalMetricsAvgDurationItem[];
externalErrCodeMetrics: externalErrCodeMetricsItem[];
usageDate: usageDataItem[];
globalTime: GlobalTime;
filteredTraceMetrics: customMetricsItem[];
serviceMap: serviceMapStore;
}
const reducers = combineReducers<StoreState>({
traceFilters: traceFiltersReducer,
inputTag: inputsReducer,
traceFilters: TraceFilterReducer,
traces: tracesReducer,
traceItem: traceItemReducer,
servicesList: serviceTableReducer,
serviceMetrics: serviceMetricsReducer,
dbOverviewMetrics: dbOverviewMetricsReducer,
topEndpointsList: topEndpointsReducer,
externalAvgDurationMetrics: externalAvgDurationMetricsReducer,
externalMetrics: externalMetricsReducer,
externalErrCodeMetrics: externalErrCodeMetricsReducer,
usageDate: usageDataReducer,
globalTime: updateGlobalTimeReducer,
filteredTraceMetrics: filteredTraceMetricsReducer,
metricsData: metricsReducer,
serviceMap: ServiceMapReducer,
});
export default reducers;

View File

@@ -1,6 +1,4 @@
import {
ActionTypes,
Action,
servicesListItem,
metricItem,
topEndpointListItem,
@@ -9,10 +7,21 @@ import {
externalMetricsItem,
dbOverviewMetricsItem,
externalMetricsAvgDurationItem,
} from "../actions";
} from "../actions/MetricsActions";
import { MetricsActionTypes as ActionTypes } from "../actions/MetricsActions/metricsActionTypes";
export const serviceTableReducer = (
state: servicesListItem[] = [
export type MetricsInitialState = {
serviceList?: servicesListItem[];
metricItems?: metricItem[];
topEndpointListItem?: topEndpointListItem[];
externalMetricsAvgDurationItem?: externalMetricsAvgDurationItem[];
externalErrCodeMetricsItem?: externalErrCodeMetricsItem[];
externalMetricsItem?: externalMetricsItem[];
dbOverviewMetricsItem?: dbOverviewMetricsItem[];
customMetricsItem?: customMetricsItem[];
};
export const metricsInitialState: MetricsInitialState = {
serviceList: [
{
serviceName: "",
p99: 0,
@@ -23,22 +32,11 @@ export const serviceTableReducer = (
errorRate: 0,
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getServicesList:
return action.payload;
default:
return state;
}
};
export const serviceMetricsReducer = (
state: metricItem[] = [
metricItems: [
{
timestamp: 0,
p50: 0,
p90: 0,
p95: 0,
p99: 0,
numCalls: 0,
callRate: 0.0,
@@ -46,49 +44,22 @@ export const serviceMetricsReducer = (
errorRate: 0,
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getServiceMetrics:
return action.payload;
default:
return state;
}
};
export const topEndpointsReducer = (
state: topEndpointListItem[] = [
{ p50: 0, p90: 0, p99: 0, numCalls: 0, name: "" },
topEndpointListItem: [
{
p50: 0,
p95: 0,
p99: 0,
numCalls: 0,
name: "",
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getTopEndpoints:
return action.payload;
default:
return state;
}
};
export const externalAvgDurationMetricsReducer = (
state: externalMetricsAvgDurationItem[] = [
externalMetricsAvgDurationItem: [
{
avgDuration: 0,
timestamp: 0,
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getAvgDurationMetrics:
return action.payload;
default:
return state;
}
};
export const externalErrCodeMetricsReducer = (
state: externalErrCodeMetricsItem[] = [
externalErrCodeMetricsItem: [
{
callRate: 0,
externalHttpUrl: "",
@@ -96,18 +67,7 @@ export const externalErrCodeMetricsReducer = (
timestamp: 0,
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getErrCodeMetrics:
return action.payload;
default:
return state;
}
};
export const externalMetricsReducer = (
state: externalMetricsItem[] = [
externalMetricsItem: [
{
avgDuration: 0,
callRate: 0,
@@ -116,18 +76,7 @@ export const externalMetricsReducer = (
timestamp: 0,
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getExternalMetrics:
return action.payload;
default:
return state;
}
};
export const dbOverviewMetricsReducer = (
state: dbOverviewMetricsItem[] = [
dbOverviewMetricsItem: [
{
avgDuration: 0,
callRate: 0,
@@ -136,24 +85,68 @@ export const dbOverviewMetricsReducer = (
timestamp: 0,
},
],
action: Action,
) => {
switch (action.type) {
case ActionTypes.getDbOverviewMetrics:
return action.payload;
default:
return state;
}
customMetricsItem: [
{
timestamp: 0,
value: 0,
},
],
};
export const filteredTraceMetricsReducer = (
state: customMetricsItem[] = [{ timestamp: 0, value: 0 }],
action: Action,
type ActionType = {
type: string;
payload: any;
};
export const metricsReducer = (
state: MetricsInitialState = metricsInitialState,
action: ActionType,
) => {
switch (action.type) {
case ActionTypes.getFilteredTraceMetrics:
return action.payload;
return {
...state,
customMetricsItem: action.payload,
};
case ActionTypes.getServiceMetrics:
return {
...state,
metricItems: action.payload,
};
case ActionTypes.getDbOverviewMetrics:
return {
...state,
dbOverviewMetricsItem: action.payload,
};
case ActionTypes.getExternalMetrics:
return {
...state,
externalMetricsItem: action.payload,
};
case ActionTypes.getTopEndpoints:
return {
...state,
topEndpointListItem: action.payload,
};
case ActionTypes.getErrCodeMetrics:
return {
...state,
externalErrCodeMetricsItem: action.payload,
};
case ActionTypes.getAvgDurationMetrics:
return {
...state,
externalMetricsAvgDurationItem: action.payload,
};
case ActionTypes.getServicesList:
return {
...state,
serviceList: action.payload,
};
default:
return state;
return {
...state,
};
}
};

View File

@@ -0,0 +1,23 @@
import { ActionTypes, Action, serviceMapStore } from "../actions";
const initialState: serviceMapStore = {
items: [],
services: [],
};
export const ServiceMapReducer = (state = initialState, action: Action) => {
switch (action.type) {
case ActionTypes.getServiceMapItems:
return {
...state,
items: action.payload,
};
case ActionTypes.getServices:
return {
...state,
services: action.payload,
};
default:
return state;
}
};

View File

@@ -1,19 +1,17 @@
import {
ActionTypes,
TraceFilters,
updateInputTagAction,
updateTraceFiltersAction,
} from "../actions";
import { ActionTypes, TraceFilters } from "../actions";
export const traceFiltersReducer = (
state: TraceFilters = {
service: "",
tags: [],
operation: "",
latency: { min: "", max: "" },
},
action: updateTraceFiltersAction,
) => {
type ACTION = {
type: ActionTypes;
payload: TraceFilters;
};
const initialState: TraceFilters = {
service: "",
tags: [],
operation: "",
latency: { min: "", max: "" },
};
const TraceFilterReducer = (state = initialState, action: ACTION) => {
switch (action.type) {
case ActionTypes.updateTraceFilters:
return action.payload;
@@ -22,14 +20,4 @@ export const traceFiltersReducer = (
}
};
export const inputsReducer = (
state: string = "",
action: updateInputTagAction,
) => {
switch (action.type) {
case ActionTypes.updateInput:
return action.payload;
default:
return state;
}
};
export default TraceFilterReducer;

View File

@@ -1,18 +0,0 @@
// dark-theme.less
@import "~antd/lib/style/color/colorPalette.less";
@import "~antd/dist/antd.less";
@import "~antd/lib/style/themes/dark.less";
// @primary-color: #00adb5;
// @border-radius-base: 4px;
// @component-background: #303030;
// @body-background: #303030;
// @popover-background: #303030;
// @border-color-base: #6f6c6c;
// @border-color-split: #424242;
// @table-header-sort-active-bg: #424242;
// @card-skeleton-bg: #424242;
// @skeleton-color: #424242;
// @table-header-sort-active-bg: #424242;

View File

@@ -1,9 +0,0 @@
/* light-theme.less */
@import "~antd/lib/style/color/colorPalette.less";
@import "~antd/dist/antd.less";
@import "~antd/lib/style/themes/default.less";
/* These are shared variables that can be extracted to their own file */
@primary-color: #00adb5;
@border-radius-base: 4px;

View File

@@ -2,6 +2,7 @@
const { resolve } = require("path");
const HtmlWebpackPlugin = require("html-webpack-plugin");
console.log(resolve(__dirname, "./src/"));
module.exports = {
mode: "development",
devtool: "source-map",
@@ -53,7 +54,9 @@ module.exports = {
},
],
},
plugins: [new HtmlWebpackPlugin({ template: "src/index.html.ejs" })],
plugins: [
new HtmlWebpackPlugin({ template: "src/index.html.ejs" }),
],
performance: {
hints: false,
},

View File

@@ -2,6 +2,7 @@
const { resolve } = require("path");
const HtmlWebpackPlugin = require("html-webpack-plugin");
const CopyPlugin = require("copy-webpack-plugin");
const CompressionPlugin = require("compression-webpack-plugin");
module.exports = {
mode: "production",
@@ -44,6 +45,9 @@ module.exports = {
],
},
plugins: [
new CompressionPlugin({
exclude: /.map$/
}),
new HtmlWebpackPlugin({ template: "src/index.html.ejs" }),
new CopyPlugin({
patterns: [{ from: resolve(__dirname, "public/"), to: "." }],

File diff suppressed because it is too large Load Diff

10
node_modules/.yarn-integrity generated vendored Normal file
View File

@@ -0,0 +1,10 @@
{
"systemParams": "darwin-x64-83",
"modulesFolders": [],
"flags": [],
"linkedModules": [],
"topLevelPatterns": [],
"lockfileEntries": {},
"files": [],
"artifacts": {}
}

Binary file not shown.

View File

@@ -0,0 +1,124 @@
package clickhouseReader
import (
"time"
"github.com/jmoiron/sqlx"
)
type Encoding string
const (
// EncodingJSON is used for spans encoded as JSON.
EncodingJSON Encoding = "json"
// EncodingProto is used for spans encoded as Protobuf.
EncodingProto Encoding = "protobuf"
)
const (
defaultDatasource string = "tcp://localhost:9000"
defaultOperationsTable string = "signoz_operations"
defaultIndexTable string = "signoz_index"
defaultSpansTable string = "signoz_spans"
defaultArchiveSpansTable string = "signoz_archive_spans"
defaultWriteBatchDelay time.Duration = 5 * time.Second
defaultWriteBatchSize int = 10000
defaultEncoding Encoding = EncodingJSON
)
const (
suffixEnabled = ".enabled"
suffixDatasource = ".datasource"
suffixOperationsTable = ".operations-table"
suffixIndexTable = ".index-table"
suffixSpansTable = ".spans-table"
suffixWriteBatchDelay = ".write-batch-delay"
suffixWriteBatchSize = ".write-batch-size"
suffixEncoding = ".encoding"
)
// NamespaceConfig is Clickhouse's internal configuration data
type namespaceConfig struct {
namespace string
Enabled bool
Datasource string
OperationsTable string
IndexTable string
SpansTable string
WriteBatchDelay time.Duration
WriteBatchSize int
Encoding Encoding
Connector Connector
}
// Connecto defines how to connect to the database
type Connector func(cfg *namespaceConfig) (*sqlx.DB, error)
func defaultConnector(cfg *namespaceConfig) (*sqlx.DB, error) {
db, err := sqlx.Open("clickhouse", cfg.Datasource)
if err != nil {
return nil, err
}
if err := db.Ping(); err != nil {
return nil, err
}
return db, nil
}
// Options store storage plugin related configs
type Options struct {
primary *namespaceConfig
others map[string]*namespaceConfig
}
// NewOptions creates a new Options struct.
func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...string) *Options {
if datasource == "" {
datasource = defaultDatasource
}
options := &Options{
primary: &namespaceConfig{
namespace: primaryNamespace,
Enabled: true,
Datasource: datasource,
OperationsTable: defaultOperationsTable,
IndexTable: defaultIndexTable,
SpansTable: defaultSpansTable,
WriteBatchDelay: defaultWriteBatchDelay,
WriteBatchSize: defaultWriteBatchSize,
Encoding: defaultEncoding,
Connector: defaultConnector,
},
others: make(map[string]*namespaceConfig, len(otherNamespaces)),
}
for _, namespace := range otherNamespaces {
if namespace == archiveNamespace {
options.others[namespace] = &namespaceConfig{
namespace: namespace,
Datasource: datasource,
OperationsTable: "",
IndexTable: "",
SpansTable: defaultArchiveSpansTable,
WriteBatchDelay: defaultWriteBatchDelay,
WriteBatchSize: defaultWriteBatchSize,
Encoding: defaultEncoding,
Connector: defaultConnector,
}
} else {
options.others[namespace] = &namespaceConfig{namespace: namespace}
}
}
return options
}
// GetPrimary returns the primary namespace configuration
func (opt *Options) getPrimary() *namespaceConfig {
return opt.primary
}

View File

@@ -0,0 +1,714 @@
package clickhouseReader
import (
"context"
"errors"
"fmt"
"os"
"strconv"
"time"
_ "github.com/ClickHouse/clickhouse-go"
"github.com/jmoiron/sqlx"
"go.signoz.io/query-service/model"
"go.uber.org/zap"
)
const (
primaryNamespace = "clickhouse"
archiveNamespace = "clickhouse-archive"
minTimespanForProgressiveSearch = time.Hour
minTimespanForProgressiveSearchMargin = time.Minute
maxProgressiveSteps = 4
)
var (
ErrNoOperationsTable = errors.New("no operations table supplied")
ErrNoIndexTable = errors.New("no index table supplied")
ErrStartTimeRequired = errors.New("start time is required for search queries")
)
// SpanWriter for reading spans from ClickHouse
type ClickHouseReader struct {
db *sqlx.DB
operationsTable string
indexTable string
spansTable string
}
// NewTraceReader returns a TraceReader for the database
func NewReader() *ClickHouseReader {
datasource := os.Getenv("ClickHouseUrl")
options := NewOptions(datasource, primaryNamespace, archiveNamespace)
db, err := initialize(options)
if err != nil {
zap.S().Error(err)
}
return &ClickHouseReader{
db: db,
operationsTable: options.primary.OperationsTable,
indexTable: options.primary.IndexTable,
spansTable: options.primary.SpansTable,
}
}
func initialize(options *Options) (*sqlx.DB, error) {
db, err := connect(options.getPrimary())
if err != nil {
return nil, fmt.Errorf("error connecting to primary db: %v", err)
}
return db, nil
}
func connect(cfg *namespaceConfig) (*sqlx.DB, error) {
if cfg.Encoding != EncodingJSON && cfg.Encoding != EncodingProto {
return nil, fmt.Errorf("unknown encoding %q, supported: %q, %q", cfg.Encoding, EncodingJSON, EncodingProto)
}
return cfg.Connector(cfg)
}
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceItem, error) {
if r.indexTable == "" {
return nil, ErrNoIndexTable
}
serviceItems := []model.ServiceItem{}
query := fmt.Sprintf("SELECT serviceName, quantile(0.99)(durationNano) as p99, avg(durationNano) as avgDuration, count(*) as numCalls FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' GROUP BY serviceName ORDER BY p99 DESC", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err := r.db.Select(&serviceItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
////////////////// Below block gets 5xx of services
serviceErrorItems := []model.ServiceItem{}
query = fmt.Sprintf("SELECT serviceName, count(*) as numErrors FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND statusCode>=500 GROUP BY serviceName", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err = r.db.Select(&serviceErrorItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
m5xx := make(map[string]int)
for j, _ := range serviceErrorItems {
m5xx[serviceErrorItems[j].ServiceName] = serviceErrorItems[j].NumErrors
}
///////////////////////////////////////////
////////////////// Below block gets 4xx of services
service4xxItems := []model.ServiceItem{}
query = fmt.Sprintf("SELECT serviceName, count(*) as num4xx FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND statusCode>=400 AND statusCode<500 GROUP BY serviceName", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err = r.db.Select(&service4xxItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
m4xx := make(map[string]int)
for j, _ := range service4xxItems {
m5xx[service4xxItems[j].ServiceName] = service4xxItems[j].Num4XX
}
for i, _ := range serviceItems {
if val, ok := m5xx[serviceItems[i].ServiceName]; ok {
serviceItems[i].NumErrors = val
}
if val, ok := m4xx[serviceItems[i].ServiceName]; ok {
serviceItems[i].Num4XX = val
}
serviceItems[i].CallRate = float32(serviceItems[i].NumCalls) / float32(queryParams.Period)
serviceItems[i].FourXXRate = float32(serviceItems[i].Num4XX) / float32(queryParams.Period)
serviceItems[i].ErrorRate = float32(serviceItems[i].NumErrors) / float32(queryParams.Period)
}
return &serviceItems, nil
}
func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
serviceOverviewItems := []model.ServiceOverviewItem{}
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, quantile(0.99)(durationNano) as p99, quantile(0.95)(durationNano) as p95,quantile(0.50)(durationNano) as p50, count(*) as numCalls FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s' GROUP BY time ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName)
err := r.db.Select(&serviceOverviewItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
serviceErrorItems := []model.ServiceErrorItem{}
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, count(*) as numErrors FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s' AND statusCode>=500 GROUP BY time ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName)
err = r.db.Select(&serviceErrorItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
m := make(map[int64]int)
for j, _ := range serviceErrorItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceErrorItems[j].Time)
m[int64(timeObj.UnixNano())] = serviceErrorItems[j].NumErrors
}
for i, _ := range serviceOverviewItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceOverviewItems[i].Time)
serviceOverviewItems[i].Timestamp = int64(timeObj.UnixNano())
serviceOverviewItems[i].Time = ""
if val, ok := m[serviceOverviewItems[i].Timestamp]; ok {
serviceOverviewItems[i].NumErrors = val
}
serviceOverviewItems[i].ErrorRate = float32(serviceOverviewItems[i].NumErrors) * 100 / float32(serviceOverviewItems[i].NumCalls)
serviceOverviewItems[i].CallRate = float32(serviceOverviewItems[i].NumCalls) / float32(queryParams.StepSeconds)
}
return &serviceOverviewItems, nil
}
func (r *ClickHouseReader) SearchSpans(ctx context.Context, queryParams *model.SpanSearchParams) (*[]model.SearchSpansResult, error) {
query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable)
args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)}
if len(queryParams.ServiceName) != 0 {
query = query + " AND serviceName = ?"
args = append(args, queryParams.ServiceName)
}
if len(queryParams.OperationName) != 0 {
query = query + " AND name = ?"
args = append(args, queryParams.OperationName)
}
if len(queryParams.Kind) != 0 {
query = query + " AND kind = ?"
args = append(args, queryParams.Kind)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= ?"
args = append(args, queryParams.MinDuration)
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= ?"
args = append(args, queryParams.MaxDuration)
}
for _, item := range queryParams.Tags {
if item.Key == "error" && item.Value == "true" {
query = query + " AND ( has(tags, 'error:true') OR statusCode>=500)"
continue
}
if item.Operator == "equals" {
query = query + " AND has(tags, ?)"
args = append(args, fmt.Sprintf("%s:%s", item.Key, item.Value))
} else if item.Operator == "contains" {
query = query + " AND tagsValues[indexOf(tagsKeys, ?)] ILIKE ?"
args = append(args, item.Key)
args = append(args, fmt.Sprintf("%%%s%%", item.Value))
} else if item.Operator == "isnotnull" {
query = query + " AND has(tagsKeys, ?)"
args = append(args, item.Key)
} else {
return nil, fmt.Errorf("Tag Operator %s not supported", item.Operator)
}
}
query = query + " ORDER BY timestamp DESC LIMIT 100"
var searchScanReponses []model.SearchSpanReponseItem
err := r.db.Select(&searchScanReponses, query, args...)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
searchSpansResult := []model.SearchSpansResult{
model.SearchSpansResult{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues"},
Events: make([][]interface{}, len(searchScanReponses)),
},
}
for i, item := range searchScanReponses {
spanEvents := item.GetValues()
searchSpansResult[0].Events[i] = spanEvents
}
return &searchSpansResult, nil
}
func (r *ClickHouseReader) GetServiceDBOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) {
var serviceDBOverviewItems []model.ServiceDBOverviewItem
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, dbSystem FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND dbName IS NOT NULL GROUP BY time, dbSystem ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err := r.db.Select(&serviceDBOverviewItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
for i, _ := range serviceDBOverviewItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceDBOverviewItems[i].Time)
serviceDBOverviewItems[i].Timestamp = int64(timeObj.UnixNano())
serviceDBOverviewItems[i].Time = ""
serviceDBOverviewItems[i].CallRate = float32(serviceDBOverviewItems[i].NumCalls) / float32(queryParams.StepSeconds)
}
if serviceDBOverviewItems == nil {
serviceDBOverviewItems = []model.ServiceDBOverviewItem{}
}
return &serviceDBOverviewItems, nil
}
func (r *ClickHouseReader) GetServiceExternalAvgDuration(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
var serviceExternalItems []model.ServiceExternalItem
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL GROUP BY time ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err := r.db.Select(&serviceExternalItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
for i, _ := range serviceExternalItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalItems[i].Time)
serviceExternalItems[i].Timestamp = int64(timeObj.UnixNano())
serviceExternalItems[i].Time = ""
serviceExternalItems[i].CallRate = float32(serviceExternalItems[i].NumCalls) / float32(queryParams.StepSeconds)
}
if serviceExternalItems == nil {
serviceExternalItems = []model.ServiceExternalItem{}
}
return &serviceExternalItems, nil
}
func (r *ClickHouseReader) GetServiceExternalErrors(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
var serviceExternalErrorItems []model.ServiceExternalItem
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, externalHttpUrl FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL AND statusCode >= 500 GROUP BY time, externalHttpUrl ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err := r.db.Select(&serviceExternalErrorItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
var serviceExternalTotalItems []model.ServiceExternalItem
queryTotal := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, externalHttpUrl FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL GROUP BY time, externalHttpUrl ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
errTotal := r.db.Select(&serviceExternalTotalItems, queryTotal)
if errTotal != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
m := make(map[string]int)
for j, _ := range serviceExternalErrorItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalErrorItems[j].Time)
m[strconv.FormatInt(timeObj.UnixNano(), 10)+"-"+serviceExternalErrorItems[j].ExternalHttpUrl] = serviceExternalErrorItems[j].NumCalls
}
for i, _ := range serviceExternalTotalItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalTotalItems[i].Time)
serviceExternalTotalItems[i].Timestamp = int64(timeObj.UnixNano())
serviceExternalTotalItems[i].Time = ""
// serviceExternalTotalItems[i].CallRate = float32(serviceExternalTotalItems[i].NumCalls) / float32(queryParams.StepSeconds)
if val, ok := m[strconv.FormatInt(serviceExternalTotalItems[i].Timestamp, 10)+"-"+serviceExternalTotalItems[i].ExternalHttpUrl]; ok {
serviceExternalTotalItems[i].NumErrors = val
serviceExternalTotalItems[i].ErrorRate = float32(serviceExternalTotalItems[i].NumErrors) * 100 / float32(serviceExternalTotalItems[i].NumCalls)
}
serviceExternalTotalItems[i].CallRate = 0
serviceExternalTotalItems[i].NumCalls = 0
}
if serviceExternalTotalItems == nil {
serviceExternalTotalItems = []model.ServiceExternalItem{}
}
return &serviceExternalTotalItems, nil
}
func (r *ClickHouseReader) GetServiceExternal(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
var serviceExternalItems []model.ServiceExternalItem
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, externalHttpUrl FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL GROUP BY time, externalHttpUrl ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err := r.db.Select(&serviceExternalItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
for i, _ := range serviceExternalItems {
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalItems[i].Time)
serviceExternalItems[i].Timestamp = int64(timeObj.UnixNano())
serviceExternalItems[i].Time = ""
serviceExternalItems[i].CallRate = float32(serviceExternalItems[i].NumCalls) / float32(queryParams.StepSeconds)
}
if serviceExternalItems == nil {
serviceExternalItems = []model.ServiceExternalItem{}
}
return &serviceExternalItems, nil
}
func (r *ClickHouseReader) GetTopEndpoints(ctx context.Context, queryParams *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error) {
var topEndpointsItems []model.TopEndpointsItem
query := fmt.Sprintf("SELECT quantile(0.5)(durationNano) as p50, quantile(0.95)(durationNano) as p95, quantile(0.99)(durationNano) as p99, COUNT(1) as numCalls, name FROM %s WHERE timestamp >= '%s' AND timestamp <= '%s' AND kind='2' and serviceName='%s' GROUP BY name", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName)
err := r.db.Select(&topEndpointsItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
if topEndpointsItems == nil {
topEndpointsItems = []model.TopEndpointsItem{}
}
return &topEndpointsItems, nil
}
func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetUsageParams) (*[]model.UsageItem, error) {
var usageItems []model.UsageItem
var query string
if len(queryParams.ServiceName) != 0 {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d HOUR) as time, count(1) as count FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' GROUP BY time ORDER BY time ASC", queryParams.StepHour, r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
} else {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d HOUR) as time, count(1) as count FROM %s WHERE timestamp>='%s' AND timestamp<='%s' GROUP BY time ORDER BY time ASC", queryParams.StepHour, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
}
err := r.db.Select(&usageItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
for i, _ := range usageItems {
timeObj, _ := time.Parse(time.RFC3339Nano, usageItems[i].Time)
usageItems[i].Timestamp = int64(timeObj.UnixNano())
usageItems[i].Time = ""
}
if usageItems == nil {
usageItems = []model.UsageItem{}
}
return &usageItems, nil
}
func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, error) {
services := []string{}
query := fmt.Sprintf(`SELECT DISTINCT serviceName FROM %s WHERE toDate(timestamp) > now() - INTERVAL 1 DAY`, r.indexTable)
err := r.db.Select(&services, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
return &services, nil
}
func (r *ClickHouseReader) GetTags(ctx context.Context, serviceName string) (*[]model.TagItem, error) {
tagItems := []model.TagItem{}
query := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagsKeys) as tagKeys FROM %s WHERE serviceName='%s' AND toDate(timestamp) > now() - INTERVAL 1 DAY`, r.indexTable, serviceName)
err := r.db.Select(&tagItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
return &tagItems, nil
}
func (r *ClickHouseReader) GetOperations(ctx context.Context, serviceName string) (*[]string, error) {
operations := []string{}
query := fmt.Sprintf(`SELECT DISTINCT(name) FROM %s WHERE serviceName='%s' AND toDate(timestamp) > now() - INTERVAL 1 DAY`, r.indexTable, serviceName)
err := r.db.Select(&operations, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
return &operations, nil
}
func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string) (*[]model.SearchSpansResult, error) {
var searchScanReponses []model.SearchSpanReponseItem
query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues, references FROM %s WHERE traceID='%s'", r.indexTable, traceId)
err := r.db.Select(&searchScanReponses, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
searchSpansResult := []model.SearchSpansResult{
model.SearchSpansResult{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References"},
Events: make([][]interface{}, len(searchScanReponses)),
},
}
for i, item := range searchScanReponses {
spanEvents := item.GetValues()
searchSpansResult[0].Events[i] = spanEvents
}
return &searchSpansResult, nil
}
func (r *ClickHouseReader) GetServiceMapDependencies(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) {
serviceMapDependencyItems := []model.ServiceMapDependencyItem{}
query := fmt.Sprintf(`SELECT spanID, parentSpanID, serviceName FROM %s WHERE timestamp>='%s' AND timestamp<='%s'`, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
err := r.db.Select(&serviceMapDependencyItems, query)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
serviceMap := make(map[string]*model.ServiceMapDependencyResponseItem)
spanId2ServiceNameMap := make(map[string]string)
for i, _ := range serviceMapDependencyItems {
spanId2ServiceNameMap[serviceMapDependencyItems[i].SpanId] = serviceMapDependencyItems[i].ServiceName
}
for i, _ := range serviceMapDependencyItems {
parent2childServiceName := spanId2ServiceNameMap[serviceMapDependencyItems[i].ParentSpanId] + "-" + spanId2ServiceNameMap[serviceMapDependencyItems[i].SpanId]
if _, ok := serviceMap[parent2childServiceName]; !ok {
serviceMap[parent2childServiceName] = &model.ServiceMapDependencyResponseItem{
Parent: spanId2ServiceNameMap[serviceMapDependencyItems[i].ParentSpanId],
Child: spanId2ServiceNameMap[serviceMapDependencyItems[i].SpanId],
CallCount: 1,
}
} else {
serviceMap[parent2childServiceName].CallCount++
}
}
retMe := make([]model.ServiceMapDependencyResponseItem, 0, len(serviceMap))
for _, dependency := range serviceMap {
if dependency.Parent == "" {
continue
}
retMe = append(retMe, *dependency)
}
return &retMe, nil
}
func (r *ClickHouseReader) SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error) {
spanSearchAggregatesResponseItems := []model.SpanSearchAggregatesResponseItem{}
aggregation_query := ""
if queryParams.Dimension == "duration" {
switch queryParams.AggregationOption {
case "p50":
aggregation_query = " quantile(0.50)(durationNano) as value "
break
case "p95":
aggregation_query = " quantile(0.95)(durationNano) as value "
break
case "p99":
aggregation_query = " quantile(0.99)(durationNano) as value "
break
}
} else if queryParams.Dimension == "calls" {
aggregation_query = " count(*) as value "
}
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable)
args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)}
if len(queryParams.ServiceName) != 0 {
query = query + " AND serviceName = ?"
args = append(args, queryParams.ServiceName)
}
if len(queryParams.OperationName) != 0 {
query = query + " AND name = ?"
args = append(args, queryParams.OperationName)
}
if len(queryParams.Kind) != 0 {
query = query + " AND kind = ?"
args = append(args, queryParams.Kind)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= ?"
args = append(args, queryParams.MinDuration)
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= ?"
args = append(args, queryParams.MaxDuration)
}
for _, item := range queryParams.Tags {
if item.Key == "error" && item.Value == "true" {
query = query + " AND ( has(tags, 'error:true') OR statusCode>=500)"
continue
}
if item.Operator == "equals" {
query = query + " AND has(tags, ?)"
args = append(args, fmt.Sprintf("%s:%s", item.Key, item.Value))
} else if item.Operator == "contains" {
query = query + " AND tagsValues[indexOf(tagsKeys, ?)] ILIKE ?"
args = append(args, item.Key)
args = append(args, fmt.Sprintf("%%%s%%", item.Value))
} else if item.Operator == "isnotnull" {
query = query + " AND has(tagsKeys, ?)"
args = append(args, item.Key)
} else {
return nil, fmt.Errorf("Tag Operator %s not supported", item.Operator)
}
}
query = query + " GROUP BY time ORDER BY time"
err := r.db.Select(&spanSearchAggregatesResponseItems, query, args...)
zap.S().Info(query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return nil, fmt.Errorf("Error in processing sql query")
}
for i, _ := range spanSearchAggregatesResponseItems {
timeObj, _ := time.Parse(time.RFC3339Nano, spanSearchAggregatesResponseItems[i].Time)
spanSearchAggregatesResponseItems[i].Timestamp = int64(timeObj.UnixNano())
spanSearchAggregatesResponseItems[i].Time = ""
if queryParams.AggregationOption == "rate_per_sec" {
spanSearchAggregatesResponseItems[i].Value = float32(spanSearchAggregatesResponseItems[i].Value) / float32(queryParams.StepSeconds)
}
}
return spanSearchAggregatesResponseItems, nil
}

View File

@@ -0,0 +1,99 @@
package druidReader
import (
"context"
"os"
"go.signoz.io/query-service/druidQuery"
"go.signoz.io/query-service/godruid"
"go.signoz.io/query-service/model"
)
type DruidReader struct {
Client *godruid.Client
SqlClient *druidQuery.SqlClient
}
func NewReader() *DruidReader {
initialize()
druidClientUrl := os.Getenv("DruidClientUrl")
client := godruid.Client{
Url: druidClientUrl,
Debug: true,
}
sqlClient := druidQuery.SqlClient{
Url: druidClientUrl,
Debug: true,
}
return &DruidReader{
Client: &client,
SqlClient: &sqlClient,
}
}
func initialize() {
}
func (druid *DruidReader) GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
return druidQuery.GetServiceOverview(druid.SqlClient, query)
}
func (druid *DruidReader) GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, error) {
return druidQuery.GetServices(druid.SqlClient, query)
}
func (druid *DruidReader) SearchSpans(ctx context.Context, query *model.SpanSearchParams) (*[]model.SearchSpansResult, error) {
return druidQuery.SearchSpans(druid.Client, query)
}
func (druid *DruidReader) GetServiceDBOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) {
return druidQuery.GetServiceDBOverview(druid.SqlClient, query)
}
func (druid *DruidReader) GetServiceExternalAvgDuration(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
return druidQuery.GetServiceExternalAvgDuration(druid.SqlClient, query)
}
func (druid *DruidReader) GetServiceExternalErrors(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
return druidQuery.GetServiceExternalErrors(druid.SqlClient, query)
}
func (druid *DruidReader) GetServiceExternal(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
return druidQuery.GetServiceExternal(druid.SqlClient, query)
}
func (druid *DruidReader) GetTopEndpoints(ctx context.Context, query *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error) {
return druidQuery.GetTopEndpoints(druid.SqlClient, query)
}
func (druid *DruidReader) GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error) {
return druidQuery.GetUsage(druid.SqlClient, query)
}
func (druid *DruidReader) GetOperations(ctx context.Context, serviceName string) (*[]string, error) {
return druidQuery.GetOperations(druid.SqlClient, serviceName)
}
func (druid *DruidReader) GetTags(ctx context.Context, serviceName string) (*[]model.TagItem, error) {
return druidQuery.GetTags(druid.SqlClient, serviceName)
}
func (druid *DruidReader) GetServicesList(ctx context.Context) (*[]string, error) {
return druidQuery.GetServicesList(druid.SqlClient)
}
func (druid *DruidReader) SearchTraces(ctx context.Context, traceId string) (*[]model.SearchSpansResult, error) {
return druidQuery.SearchTraces(druid.Client, traceId)
}
func (druid *DruidReader) GetServiceMapDependencies(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) {
return druidQuery.GetServiceMapDependencies(druid.SqlClient, query)
}
func (druid *DruidReader) SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error) {
return druidQuery.SearchSpansAggregate(druid.Client, queryParams)
}

View File

@@ -1,14 +1,13 @@
package app
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/gorilla/mux"
"github.com/posthog/posthog-go"
"go.signoz.io/query-service/druidQuery"
"go.signoz.io/query-service/godruid"
"go.uber.org/zap"
)
@@ -23,17 +22,15 @@ type APIHandler struct {
// queryParser queryParser
basePath string
apiPrefix string
client *godruid.Client
sqlClient *druidQuery.SqlClient
reader *Reader
pc *posthog.Client
distinctId string
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(client *godruid.Client, sqlClient *druidQuery.SqlClient, pc *posthog.Client, distinctId string) *APIHandler {
func NewAPIHandler(reader *Reader, pc *posthog.Client, distinctId string) *APIHandler {
aH := &APIHandler{
client: client,
sqlClient: sqlClient,
reader: reader,
pc: pc,
distinctId: distinctId,
}
@@ -59,7 +56,7 @@ type structuredError struct {
func (aH *APIHandler) RegisterRoutes(router *mux.Router) {
router.HandleFunc("/api/v1/user", aH.user).Methods(http.MethodPost)
router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
// router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
router.HandleFunc("/api/v1/services", aH.getServices).Methods(http.MethodGet)
router.HandleFunc("/api/v1/services/list", aH.getServicesList).Methods(http.MethodGet)
router.HandleFunc("/api/v1/service/overview", aH.getServiceOverview).Methods(http.MethodGet)
@@ -74,6 +71,7 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) {
router.HandleFunc("/api/v1/tags", aH.searchTags).Methods(http.MethodGet)
router.HandleFunc("/api/v1/traces/{traceId}", aH.searchTraces).Methods(http.MethodGet)
router.HandleFunc("/api/v1/usage", aH.getUsage).Methods(http.MethodGet)
router.HandleFunc("/api/v1/serviceMapDependencies", aH.serviceMapDependencies).Methods(http.MethodGet)
}
func (aH *APIHandler) user(w http.ResponseWriter, r *http.Request) {
@@ -114,7 +112,7 @@ func (aH *APIHandler) getOperations(w http.ResponseWriter, r *http.Request) {
return
}
result, err := druidQuery.GetOperations(aH.sqlClient, serviceName)
result, err := (*aH.reader).GetOperations(context.Background(), serviceName)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -125,7 +123,7 @@ func (aH *APIHandler) getOperations(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
result, err := druidQuery.GetServicesList(aH.sqlClient)
result, err := (*aH.reader).GetServicesList(context.Background())
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -138,7 +136,7 @@ func (aH *APIHandler) searchTags(w http.ResponseWriter, r *http.Request) {
serviceName := r.URL.Query().Get("service")
result, err := druidQuery.GetTags(aH.sqlClient, serviceName)
result, err := (*aH.reader).GetTags(context.Background(), serviceName)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -154,7 +152,8 @@ func (aH *APIHandler) getTopEndpoints(w http.ResponseWriter, r *http.Request) {
return
}
result, err := druidQuery.GetTopEndpoints(aH.sqlClient, query)
result, err := (*aH.reader).GetTopEndpoints(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -170,7 +169,7 @@ func (aH *APIHandler) getUsage(w http.ResponseWriter, r *http.Request) {
return
}
result, err := druidQuery.GetUsage(aH.sqlClient, query)
result, err := (*aH.reader).GetUsage(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -186,7 +185,8 @@ func (aH *APIHandler) getServiceDBOverview(w http.ResponseWriter, r *http.Reques
return
}
result, err := druidQuery.GetServiceDBOverview(aH.sqlClient, query)
result, err := (*aH.reader).GetServiceDBOverview(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -202,7 +202,7 @@ func (aH *APIHandler) getServiceExternal(w http.ResponseWriter, r *http.Request)
return
}
result, err := druidQuery.GetServiceExternal(aH.sqlClient, query)
result, err := (*aH.reader).GetServiceExternal(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -218,7 +218,7 @@ func (aH *APIHandler) GetServiceExternalAvgDuration(w http.ResponseWriter, r *ht
return
}
result, err := druidQuery.GetServiceExternalAvgDuration(aH.sqlClient, query)
result, err := (*aH.reader).GetServiceExternalAvgDuration(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -234,7 +234,7 @@ func (aH *APIHandler) getServiceExternalErrors(w http.ResponseWriter, r *http.Re
return
}
result, err := druidQuery.GetServiceExternalErrors(aH.sqlClient, query)
result, err := (*aH.reader).GetServiceExternalErrors(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -250,7 +250,7 @@ func (aH *APIHandler) getServiceOverview(w http.ResponseWriter, r *http.Request)
return
}
result, err := druidQuery.GetServiceOverview(aH.sqlClient, query)
result, err := (*aH.reader).GetServiceOverview(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -266,7 +266,7 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
return
}
result, err := druidQuery.GetServices(aH.sqlClient, query)
result, err := (*aH.reader).GetServices(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -280,12 +280,28 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
aH.writeJSON(w, r, result)
}
func (aH *APIHandler) serviceMapDependencies(w http.ResponseWriter, r *http.Request) {
query, err := parseGetServicesRequest(r)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
result, err := (*aH.reader).GetServiceMapDependencies(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
aH.writeJSON(w, r, result)
}
func (aH *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
traceId := vars["traceId"]
result, err := druidQuery.SearchTraces(aH.client, traceId)
result, err := (*aH.reader).SearchTraces(context.Background(), traceId)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -293,6 +309,7 @@ func (aH *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
aH.writeJSON(w, r, result)
}
func (aH *APIHandler) searchSpansAggregates(w http.ResponseWriter, r *http.Request) {
query, err := parseSearchSpanAggregatesRequest(r)
@@ -300,7 +317,7 @@ func (aH *APIHandler) searchSpansAggregates(w http.ResponseWriter, r *http.Reque
return
}
result, err := druidQuery.SearchSpansAggregate(aH.client, query)
result, err := (*aH.reader).SearchSpansAggregate(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -315,7 +332,9 @@ func (aH *APIHandler) searchSpans(w http.ResponseWriter, r *http.Request) {
return
}
result, err := druidQuery.SearchSpans(aH.client, query)
// result, err := druidQuery.SearchSpans(aH.client, query)
result, err := (*aH.reader).SearchSpans(context.Background(), query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
@@ -323,20 +342,20 @@ func (aH *APIHandler) searchSpans(w http.ResponseWriter, r *http.Request) {
aH.writeJSON(w, r, result)
}
func (aH *APIHandler) getApplicationPercentiles(w http.ResponseWriter, r *http.Request) {
// vars := mux.Vars(r)
// func (aH *APIHandler) getApplicationPercentiles(w http.ResponseWriter, r *http.Request) {
// // vars := mux.Vars(r)
query, err := parseApplicationPercentileRequest(r)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
// query, err := parseApplicationPercentileRequest(r)
// if aH.handleError(w, err, http.StatusBadRequest) {
// return
// }
result, err := druidQuery.GetApplicationPercentiles(aH.client, query)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
aH.writeJSON(w, r, result)
}
// result, err := (*aH.reader).GetApplicationPercentiles(context.Background(), query)
// if aH.handleError(w, err, http.StatusBadRequest) {
// return
// }
// aH.writeJSON(w, r, result)
// }
func (aH *APIHandler) handleError(w http.ResponseWriter, err error, statusCode int) bool {
if err == nil {

View File

@@ -0,0 +1,26 @@
package app
import (
"context"
"go.signoz.io/query-service/model"
)
type Reader interface {
GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error)
GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, error)
// GetApplicationPercentiles(ctx context.Context, query *model.ApplicationPercentileParams) ([]godruid.Timeseries, error)
SearchSpans(ctx context.Context, query *model.SpanSearchParams) (*[]model.SearchSpansResult, error)
GetServiceDBOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error)
GetServiceExternalAvgDuration(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error)
GetServiceExternalErrors(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error)
GetServiceExternal(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error)
GetTopEndpoints(ctx context.Context, query *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error)
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
GetOperations(ctx context.Context, serviceName string) (*[]string, error)
GetTags(ctx context.Context, serviceName string) (*[]model.TagItem, error)
GetServicesList(ctx context.Context) (*[]string, error)
SearchTraces(ctx context.Context, traceID string) (*[]model.SearchSpansResult, error)
GetServiceMapDependencies(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error)
}

View File

@@ -16,7 +16,7 @@ var allowedDimesions = []string{"calls", "duration"}
var allowedAggregations = map[string][]string{
"calls": []string{"count", "rate_per_sec"},
"duration": []string{"avg", "p50", "p90", "p99"},
"duration": []string{"avg", "p50", "p95", "p99"},
}
func parseGetTopEndpointsRequest(r *http.Request) (*model.GetTopEndpointsParams, error) {
@@ -38,6 +38,8 @@ func parseGetTopEndpointsRequest(r *http.Request) (*model.GetTopEndpointsParams,
StartTime: startTime.Format(time.RFC3339Nano),
EndTime: endTime.Format(time.RFC3339Nano),
ServiceName: serviceName,
Start: startTime,
End: endTime,
}
return &getTopEndpointsParams, nil
@@ -64,12 +66,16 @@ func parseGetUsageRequest(r *http.Request) (*model.GetUsageParams, error) {
}
serviceName := r.URL.Query().Get("service")
stepHour := stepInt / 3600
getUsageParams := model.GetUsageParams{
StartTime: startTime.Format(time.RFC3339Nano),
EndTime: endTime.Format(time.RFC3339Nano),
Start: startTime,
End: endTime,
ServiceName: serviceName,
Period: fmt.Sprintf("PT%dH", stepInt/3600),
Period: fmt.Sprintf("PT%dH", stepHour),
StepHour: stepHour,
}
return &getUsageParams, nil
@@ -101,7 +107,9 @@ func parseGetServiceExternalRequest(r *http.Request) (*model.GetServiceOverviewP
}
getServiceOverviewParams := model.GetServiceOverviewParams{
Start: startTime,
StartTime: startTime.Format(time.RFC3339Nano),
End: endTime,
EndTime: endTime.Format(time.RFC3339Nano),
ServiceName: serviceName,
Period: fmt.Sprintf("PT%dM", stepInt/60),
@@ -137,7 +145,9 @@ func parseGetServiceOverviewRequest(r *http.Request) (*model.GetServiceOverviewP
}
getServiceOverviewParams := model.GetServiceOverviewParams{
Start: startTime,
StartTime: startTime.Format(time.RFC3339Nano),
End: endTime,
EndTime: endTime.Format(time.RFC3339Nano),
ServiceName: serviceName,
Period: fmt.Sprintf("PT%dM", stepInt/60),
@@ -160,7 +170,9 @@ func parseGetServicesRequest(r *http.Request) (*model.GetServicesParams, error)
}
getServicesParams := model.GetServicesParams{
Start: startTime,
StartTime: startTime.Format(time.RFC3339Nano),
End: endTime,
EndTime: endTime.Format(time.RFC3339Nano),
Period: int(endTime.Unix() - startTime.Unix()),
}
@@ -222,6 +234,8 @@ func parseSearchSpanAggregatesRequest(r *http.Request) (*model.SpanSearchAggrega
}
params := &model.SpanSearchAggregatesParams{
Start: startTime,
End: endTime,
Intervals: fmt.Sprintf("%s/%s", startTimeStr, endTimeStr),
GranOrigin: startTimeStr,
GranPeriod: granPeriod,
@@ -241,6 +255,12 @@ func parseSearchSpanAggregatesRequest(r *http.Request) (*model.SpanSearchAggrega
zap.S().Debug("Operation Name: ", operationName)
}
kind := r.URL.Query().Get("kind")
if len(kind) != 0 {
params.Kind = kind
zap.S().Debug("Kind: ", kind)
}
minDuration, err := parseTimestamp("minDuration", r)
if err == nil {
params.MinDuration = *minDuration
@@ -277,6 +297,8 @@ func parseSpanSearchRequest(r *http.Request) (*model.SpanSearchParams, error) {
// fmt.Println(startTimeStr)
params := &model.SpanSearchParams{
Intervals: fmt.Sprintf("%s/%s", startTimeStr, endTimeStr),
Start: startTime,
End: endTime,
Limit: 100,
Order: "descending",
}
@@ -292,6 +314,12 @@ func parseSpanSearchRequest(r *http.Request) (*model.SpanSearchParams, error) {
zap.S().Debug("Operation Name: ", operationName)
}
kind := r.URL.Query().Get("kind")
if len(kind) != 0 {
params.Kind = kind
zap.S().Debug("Kind: ", kind)
}
minDuration, err := parseTimestamp("minDuration", r)
if err == nil {
params.MinDuration = *minDuration

View File

@@ -1,8 +1,10 @@
package app
import (
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/google/uuid"
@@ -11,16 +13,16 @@ import (
"github.com/posthog/posthog-go"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"go.signoz.io/query-service/druidQuery"
"go.signoz.io/query-service/godruid"
"go.signoz.io/query-service/app/clickhouseReader"
"go.signoz.io/query-service/app/druidReader"
"go.signoz.io/query-service/healthcheck"
"go.signoz.io/query-service/utils"
"go.uber.org/zap"
)
type ServerOptions struct {
HTTPHostPort string
DruidClientUrl string
HTTPHostPort string
// DruidClientUrl string
}
// Server runs HTTP, Mux and a grpc server
@@ -28,11 +30,10 @@ type Server struct {
// logger *zap.Logger
// querySvc *querysvc.QueryService
// queryOptions *QueryOptions
serverOptions *ServerOptions
// tracer opentracing.Tracer // TODO make part of flags.Service
conn net.Listener
serverOptions *ServerOptions
conn net.Listener
// grpcConn net.Listener
httpConn net.Listener
// grpcServer *grpc.Server
@@ -64,6 +65,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// if err != nil {
// return nil, err
// }
httpServer, err := createHTTPServer()
if err != nil {
return nil, err
}
return &Server{
// logger: logger,
@@ -72,7 +78,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// tracer: tracer,
// grpcServer: grpcServer,
serverOptions: serverOptions,
httpServer: createHTTPServer(serverOptions.DruidClientUrl),
httpServer: httpServer,
separatePorts: true,
// separatePorts: grpcPort != httpPort,
unavailableChannel: make(chan healthcheck.Status),
@@ -82,22 +88,25 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
var posthogClient posthog.Client
var distinctId string
func createHTTPServer(druidClientUrl string) *http.Server {
func createHTTPServer() (*http.Server, error) {
posthogClient = posthog.New("H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w")
distinctId = uuid.New().String()
client := godruid.Client{
Url: druidClientUrl,
Debug: true,
var reader Reader
storage := os.Getenv("STORAGE")
if storage == "druid" {
zap.S().Info("Using Apache Druid as datastore ...")
reader = druidReader.NewReader()
} else if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
reader = clickhouseReader.NewReader()
} else {
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
}
sqlClient := druidQuery.SqlClient{
Url: druidClientUrl,
Debug: true,
}
apiHandler := NewAPIHandler(&client, &sqlClient, &posthogClient, distinctId)
apiHandler := NewAPIHandler(&reader, &posthogClient, distinctId)
r := NewRouter()
r.Use(analyticsMiddleware)
@@ -118,7 +127,7 @@ func createHTTPServer(druidClientUrl string) *http.Server {
return &http.Server{
Handler: handler,
}
}, nil
}
func loggingMiddleware(next http.Handler) http.Handler {

View File

@@ -3,6 +3,7 @@ package druidQuery
import (
"encoding/json"
"fmt"
"strconv"
"time"
"go.signoz.io/query-service/constants"
@@ -10,77 +11,6 @@ import (
"go.uber.org/zap"
)
type ServiceItem struct {
ServiceName string `json:"serviceName"`
Percentile99 float32 `json:"p99"`
AvgDuration float32 `json:"avgDuration"`
NumCalls int `json:"numCalls"`
CallRate float32 `json:"callRate"`
NumErrors int `json:"numErrors"`
ErrorRate float32 `json:"errorRate"`
}
type ServiceListErrorItem struct {
ServiceName string `json:"serviceName"`
NumErrors int `json:"numErrors"`
}
type ServiceErrorItem struct {
Time string `json:"time,omitempty"`
Timestamp int64 `json:"timestamp"`
NumErrors int `json:"numErrors"`
}
type ServiceOverviewItem struct {
Time string `json:"time,omitempty"`
Timestamp int64 `json:"timestamp"`
Percentile50 float32 `json:"p50"`
Percentile90 float32 `json:"p90"`
Percentile99 float32 `json:"p99"`
NumCalls int `json:"numCalls"`
CallRate float32 `json:"callRate"`
NumErrors int `json:"numErrors"`
ErrorRate float32 `json:"errorRate"`
}
type ServiceExternalItem struct {
Time string `json:"time,omitempty"`
Timestamp int64 `json:"timestamp,omitempty"`
ExternalHttpUrl string `json:"externalHttpUrl,omitempty"`
AvgDuration float32 `json:"avgDuration,omitempty"`
NumCalls int `json:"numCalls,omitempty"`
CallRate float32 `json:"callRate,omitempty"`
NumErrors int `json:"numErrors"`
ErrorRate float32 `json:"errorRate"`
}
type ServiceDBOverviewItem struct {
Time string `json:"time,omitempty"`
Timestamp int64 `json:"timestamp,omitempty"`
DBSystem string `json:"dbSystem,omitempty"`
AvgDuration float32 `json:"avgDuration,omitempty"`
NumCalls int `json:"numCalls,omitempty"`
CallRate float32 `json:"callRate,omitempty"`
}
type UsageItem struct {
Time string `json:"time,omitempty"`
Timestamp int64 `json:"timestamp"`
Count int64 `json:"count"`
}
type TopEnpointsItem struct {
Percentile50 float32 `json:"p50"`
Percentile90 float32 `json:"p90"`
Percentile99 float32 `json:"p99"`
NumCalls int `json:"numCalls"`
Name string `json:"name"`
}
type TagItem struct {
TagKeys string `json:"tagKeys"`
TagCount int `json:"tagCount"`
}
func GetOperations(client *SqlClient, serviceName string) (*[]string, error) {
sqlQuery := fmt.Sprintf(`SELECT DISTINCT(Name) FROM %s WHERE ServiceName='%s' AND __time > CURRENT_TIMESTAMP - INTERVAL '1' DAY`, constants.DruidDatasource, serviceName)
@@ -112,7 +42,7 @@ func GetOperations(client *SqlClient, serviceName string) (*[]string, error) {
func GetServicesList(client *SqlClient) (*[]string, error) {
sqlQuery := fmt.Sprintf(`SELECT DISTINCT(ServiceName) FROM %s`, constants.DruidDatasource)
sqlQuery := fmt.Sprintf(`SELECT DISTINCT(ServiceName) FROM %s WHERE __time > CURRENT_TIMESTAMP - INTERVAL '1' DAY`, constants.DruidDatasource)
// zap.S().Debug(sqlQuery)
response, err := client.Query(sqlQuery, "array")
@@ -139,7 +69,7 @@ func GetServicesList(client *SqlClient) (*[]string, error) {
return &servicesListReponse, nil
}
func GetTags(client *SqlClient, serviceName string) (*[]TagItem, error) {
func GetTags(client *SqlClient, serviceName string) (*[]model.TagItem, error) {
var sqlQuery string
@@ -160,7 +90,7 @@ func GetTags(client *SqlClient, serviceName string) (*[]TagItem, error) {
// zap.S().Info(string(response))
res := new([]TagItem)
res := new([]model.TagItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -171,9 +101,9 @@ func GetTags(client *SqlClient, serviceName string) (*[]TagItem, error) {
return &tagResponse, nil
}
func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]TopEnpointsItem, error) {
func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error) {
sqlQuery := fmt.Sprintf(`SELECT APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.9) as p90, APPROX_QUANTILE_DS("QuantileDuration", 0.99) as p99, COUNT(SpanId) as numCalls, Name FROM "%s" WHERE "__time" >= '%s' AND "__time" <= '%s' AND "Kind"='2' and "ServiceName"='%s' GROUP BY Name`, constants.DruidDatasource, query.StartTime, query.EndTime, query.ServiceName)
sqlQuery := fmt.Sprintf(`SELECT APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.95) as p95, APPROX_QUANTILE_DS("QuantileDuration", 0.99) as p99, COUNT(SpanId) as numCalls, Name FROM "%s" WHERE "__time" >= '%s' AND "__time" <= '%s' AND "Kind"='2' and "ServiceName"='%s' GROUP BY Name`, constants.DruidDatasource, query.StartTime, query.EndTime, query.ServiceName)
// zap.S().Debug(sqlQuery)
@@ -186,7 +116,7 @@ func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]
// zap.S().Info(string(response))
res := new([]TopEnpointsItem)
res := new([]model.TopEndpointsItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -197,7 +127,7 @@ func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]
return &topEnpointsResponse, nil
}
func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]UsageItem, error) {
func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]model.UsageItem, error) {
var sqlQuery string
@@ -220,7 +150,7 @@ func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]UsageItem, err
// zap.S().Info(string(response))
res := new([]UsageItem)
res := new([]model.UsageItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -237,7 +167,7 @@ func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]UsageItem, err
return &usageResponse, nil
}
func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceExternalItem, error) {
func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", AVG(DurationNano) as "avgDuration" FROM %s WHERE ServiceName='%s' AND Kind='3' AND ExternalHttpUrl != '' AND "__time" >= '%s' AND "__time" <= '%s'
GROUP BY TIME_FLOOR(__time, '%s')`, query.Period, constants.DruidDatasource, query.ServiceName, query.StartTime, query.EndTime, query.Period)
@@ -254,7 +184,7 @@ func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOve
// responseStr := string(response)
// zap.S().Info(responseStr)
res := new([]ServiceExternalItem)
res := new([]model.ServiceExternalItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -273,7 +203,7 @@ func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOve
return &servicesExternalResponse, nil
}
func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceExternalItem, error) {
func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", COUNT(SpanId) as "numCalls", ExternalHttpUrl as externalHttpUrl FROM %s WHERE ServiceName='%s' AND Kind='3' AND ExternalHttpUrl != '' AND StatusCode >= 500 AND "__time" >= '%s' AND "__time" <= '%s'
GROUP BY TIME_FLOOR(__time, '%s'), ExternalHttpUrl`, query.Period, constants.DruidDatasource, query.ServiceName, query.StartTime, query.EndTime, query.Period)
@@ -290,7 +220,7 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
// responseStr := string(response)
// zap.S().Info(responseStr)
res := new([]ServiceExternalItem)
res := new([]model.ServiceExternalItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -312,18 +242,18 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
// responseStr := string(response)
// zap.S().Info(responseStr)
resTotal := new([]ServiceExternalItem)
resTotal := new([]model.ServiceExternalItem)
err = json.Unmarshal(responseTotal, resTotal)
if err != nil {
zap.S().Error(err)
return nil, fmt.Errorf("Error in unmarshalling response from druid")
}
m := make(map[int64]int)
m := make(map[string]int)
for j, _ := range *res {
timeObj, _ := time.Parse(time.RFC3339Nano, (*res)[j].Time)
m[int64(timeObj.UnixNano())] = (*res)[j].NumCalls
m[strconv.FormatInt(timeObj.UnixNano(), 10)+"-"+(*res)[j].ExternalHttpUrl] = (*res)[j].NumCalls
}
for i, _ := range *resTotal {
@@ -332,7 +262,7 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
(*resTotal)[i].Time = ""
(*resTotal)[i].CallRate = float32((*resTotal)[i].NumCalls) / float32(query.StepSeconds)
if val, ok := m[(*resTotal)[i].Timestamp]; ok {
if val, ok := m[strconv.FormatInt((*resTotal)[i].Timestamp, 10)+"-"+(*resTotal)[i].ExternalHttpUrl]; ok {
(*resTotal)[i].NumErrors = val
(*resTotal)[i].ErrorRate = float32((*resTotal)[i].NumErrors) * 100 / float32((*resTotal)[i].NumCalls)
}
@@ -345,7 +275,7 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
return &servicesExternalResponse, nil
}
func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceExternalItem, error) {
func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", AVG(DurationNano) as "avgDuration", COUNT(SpanId) as "numCalls", ExternalHttpUrl as externalHttpUrl FROM %s WHERE ServiceName='%s' AND Kind='3' AND ExternalHttpUrl != ''
AND "__time" >= '%s' AND "__time" <= '%s'
@@ -363,7 +293,7 @@ func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams
// responseStr := string(response)
// zap.S().Info(responseStr)
res := new([]ServiceExternalItem)
res := new([]model.ServiceExternalItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -382,7 +312,7 @@ func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams
return &servicesExternalResponse, nil
}
func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceDBOverviewItem, error) {
func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) {
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", AVG(DurationNano) as "avgDuration", COUNT(SpanId) as "numCalls", DBSystem as "dbSystem" FROM %s WHERE ServiceName='%s' AND Kind='3' AND DBName IS NOT NULL
AND "__time" >= '%s' AND "__time" <= '%s'
@@ -400,7 +330,7 @@ func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewPara
// responseStr := string(response)
// zap.S().Info(responseStr)
res := new([]ServiceDBOverviewItem)
res := new([]model.ServiceDBOverviewItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -419,9 +349,9 @@ func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewPara
return &servicesDBOverviewResponse, nil
}
func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceOverviewItem, error) {
func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.9) as p90,
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.95) as p95,
APPROX_QUANTILE_DS("QuantileDuration", 0.99) as p99, COUNT("SpanId") as "numCalls" FROM "%s" WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' and "ServiceName"='%s' GROUP BY TIME_FLOOR(__time, '%s') `, query.Period, constants.DruidDatasource, query.StartTime, query.EndTime, query.ServiceName, query.Period)
// zap.S().Debug(sqlQuery)
@@ -435,7 +365,7 @@ func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams
// zap.S().Info(string(response))
res := new([]ServiceOverviewItem)
res := new([]model.ServiceOverviewItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
@@ -455,7 +385,7 @@ func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams
// zap.S().Info(string(response))
resError := new([]ServiceErrorItem)
resError := new([]model.ServiceErrorItem)
err = json.Unmarshal(responseError, resError)
if err != nil {
zap.S().Error(err)
@@ -485,7 +415,7 @@ func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams
return &servicesOverviewResponse, nil
}
func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceItem, error) {
func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]model.ServiceItem, error) {
sqlQuery := fmt.Sprintf(`SELECT APPROX_QUANTILE_DS("QuantileDuration", 0.99) as "p99", AVG("DurationNano") as "avgDuration", COUNT(SpanId) as numCalls, "ServiceName" as "serviceName" FROM %s WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' GROUP BY "ServiceName" ORDER BY "p99" DESC`, constants.DruidDatasource, query.StartTime, query.EndTime)
@@ -500,13 +430,15 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
// zap.S().Info(string(response))
res := new([]ServiceItem)
res := new([]model.ServiceItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
return nil, fmt.Errorf("Error in unmarshalling response from druid")
}
////////////////// Below block gets 5xx of services
sqlQuery = fmt.Sprintf(`SELECT COUNT(SpanId) as numErrors, "ServiceName" as "serviceName" FROM %s WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' and "StatusCode">=500 GROUP BY "ServiceName"`, constants.DruidDatasource, query.StartTime, query.EndTime)
responseError, err := client.Query(sqlQuery, "object")
@@ -520,7 +452,7 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
// zap.S().Info(string(response))
resError := new([]ServiceListErrorItem)
resError := new([]model.ServiceListErrorItem)
err = json.Unmarshal(responseError, resError)
if err != nil {
zap.S().Error(err)
@@ -533,12 +465,48 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
m[(*resError)[j].ServiceName] = (*resError)[j].NumErrors
}
///////////////////////////////////////////
////////////////// Below block gets 4xx of services
sqlQuery = fmt.Sprintf(`SELECT COUNT(SpanId) as num4xx, "ServiceName" as "serviceName" FROM %s WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' and "StatusCode">=400 and "StatusCode" < 500 GROUP BY "ServiceName"`, constants.DruidDatasource, query.StartTime, query.EndTime)
response4xx, err := client.Query(sqlQuery, "object")
// zap.S().Debug(sqlQuery)
if err != nil {
zap.S().Error(query, err)
return nil, fmt.Errorf("Something went wrong in druid query")
}
// zap.S().Info(string(response))
res4xx := new([]model.ServiceListErrorItem)
err = json.Unmarshal(response4xx, res4xx)
if err != nil {
zap.S().Error(err)
return nil, fmt.Errorf("Error in unmarshalling response from druid")
}
m4xx := make(map[string]int)
for j, _ := range *res4xx {
m4xx[(*res4xx)[j].ServiceName] = (*res4xx)[j].Num4xx
}
///////////////////////////////////////////
for i, _ := range *res {
if val, ok := m[(*res)[i].ServiceName]; ok {
(*res)[i].NumErrors = val
}
if val, ok := m4xx[(*res)[i].ServiceName]; ok {
(*res)[i].Num4XX = val
}
(*res)[i].FourXXRate = float32((*res)[i].Num4XX) * 100 / float32((*res)[i].NumCalls)
(*res)[i].ErrorRate = float32((*res)[i].NumErrors) * 100 / float32((*res)[i].NumCalls)
(*res)[i].CallRate = float32((*res)[i].NumCalls) / float32(query.Period)
@@ -546,3 +514,58 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
servicesResponse := (*res)[1:]
return &servicesResponse, nil
}
func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) {
sqlQuery := fmt.Sprintf(`SELECT SpanId, ParentSpanId, ServiceName FROM %s WHERE "__time" >= '%s' AND "__time" <= '%s' ORDER BY __time DESC LIMIT 100000`, constants.DruidDatasource, query.StartTime, query.EndTime)
// zap.S().Debug(sqlQuery)
response, err := client.Query(sqlQuery, "object")
if err != nil {
zap.S().Error(query, err)
return nil, fmt.Errorf("Something went wrong in druid query")
}
// responseStr := string(response)
// zap.S().Info(responseStr)
res := new([]model.ServiceMapDependencyItem)
err = json.Unmarshal(response, res)
if err != nil {
zap.S().Error(err)
return nil, fmt.Errorf("Error in unmarshalling response from druid")
}
// resCount := len(*res)
// fmt.Println(resCount)
serviceMap := make(map[string]*model.ServiceMapDependencyResponseItem)
spanId2ServiceNameMap := make(map[string]string)
for i, _ := range *res {
spanId2ServiceNameMap[(*res)[i].SpanId] = (*res)[i].ServiceName
}
for i, _ := range *res {
parent2childServiceName := spanId2ServiceNameMap[(*res)[i].ParentSpanId] + "-" + spanId2ServiceNameMap[(*res)[i].SpanId]
if _, ok := serviceMap[parent2childServiceName]; !ok {
serviceMap[parent2childServiceName] = &model.ServiceMapDependencyResponseItem{
Parent: spanId2ServiceNameMap[(*res)[i].ParentSpanId],
Child: spanId2ServiceNameMap[(*res)[i].SpanId],
CallCount: 1,
}
} else {
serviceMap[parent2childServiceName].CallCount++
}
}
retMe := make([]model.ServiceMapDependencyResponseItem, 0, len(serviceMap))
for _, dependency := range serviceMap {
if dependency.Parent == "" {
continue
}
retMe = append(retMe, *dependency)
}
return &retMe, nil
}

Some files were not shown because too many files have changed in this diff Show More