Compare commits

...

336 Commits
v0.7.4 ... v0.8

Author SHA1 Message Date
Prashant Shahi
1f2ec0d728 chore(release): 📌 pin versions: SigNoz 0.8.2, OtelCollector 0.45.1-0.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-15 01:47:04 +05:30
Srikanth Chekuri
4f12f8c85c fix: incorrect 5xx rate calculation (#1229) 2022-06-14 01:09:44 +05:30
Ankit Nayan
7b315c6766 Merge pull request #1246 from SigNoz/release/v0.8.1
Release/v0.8.1
2022-06-09 21:17:59 +05:30
Prashant Shahi
676fe892a5 chore(release): 📌 pin versions: OtelCollectors 0.45.1-0.2 and config changes
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-09 20:58:10 +05:30
Prashant Shahi
15260e0e14 Merge branch 'main' into release/v0.8.1 2022-06-09 17:24:12 +05:30
Prashant Shahi
ce7be6e7cd chore(release): 📌 pin versions: SigNoz 0.8.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-09 17:20:11 +05:30
palash-signoz
99d38860cb Merge pull request #1243 from pranshuchittora/pranshuchittora/feat/dashboard-save-rbac
feat(FE): save dashboard with RBAC permissions
2022-06-09 12:23:18 +05:30
Pranshu Chittora
1f4f281965 feat(FE): save dashbaord with RBAC permissions 2022-06-09 12:04:47 +05:30
palash-signoz
4aa4bf9ea2 Merge pull request #1242 from pranshuchittora/pranshuchittora/feat/dashboard-edit-permission
feat(FE): dashboard edit permission based on RBAC
2022-06-08 23:25:21 +05:30
Pranshu Chittora
052eb25cff chore(FE): sidebar red dot styling 2022-06-08 23:15:48 +05:30
Pranshu Chittora
ce14638a63 feat(FE): dashboard edit permission based on RBAC 2022-06-08 22:57:34 +05:30
Prashant Shahi
fa142707dc chore(alertmanager): 🔧 use query-service internalport (#1241)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-08 16:03:48 +05:30
Amol Umbark
5ae4e05c96 HTTP listener for internal services (#1238)
* feat: added private http server to handle internal service requests
* feat: added private port default to constants
2022-06-08 12:22:25 +05:30
palash-signoz
b7d52b8fba fix: dashboard is updated (#1240)
* fix: dashboard is updated

* fix: redux is made empty when creating dashbaord

Co-authored-by: Palash gupta <palash@signoz.io>
2022-06-08 11:50:41 +05:30
palash-signoz
1c90e62189 feat: dashboard layout is updated (#1221)
* feat: dashboard layout is updated

* feat: onClick is made fixed

* feat: layout is updated

* feat: layout is updated

* feat: layout is updated

* fix: memo is removed and grid layout component is refactored to use use query

* fix: saveDashboard is updated

* feat: layout is fixed

* fix: tsc error are fixed

* fix: delete widgets is updated

* fix: useMount once is added

* fix: useMount once is removed

* chore: removed the commented code

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-06-07 16:14:49 +05:30
palash-signoz
cfeb631a6e Merge pull request #1217 from palash-signoz/1215-signup
fix: button is disable until condition is met
2022-06-07 16:02:40 +05:30
palash-signoz
4fc4ab0611 Merge branch 'develop' into 1215-signup 2022-06-01 18:16:08 +05:30
palash-signoz
b107902c31 Merge pull request #1220 from palash-signoz/1219-video-update
chore: video link is updated
2022-06-01 18:15:52 +05:30
palash-signoz
2d83afd0c4 Merge branch 'develop' into 1215-signup 2022-05-31 11:20:29 +05:30
palash-signoz
e641577e1c Merge branch 'develop' into 1219-video-update 2022-05-31 11:19:49 +05:30
Palash gupta
3e4b56e012 chore: video link is updated 2022-05-31 11:18:52 +05:30
palash-signoz
697fd1d1bf Merge pull request #1209 from palash-signoz/dashboard-layout-fix
fix: layout is updated
2022-05-30 22:31:13 +05:30
palash-signoz
21dbdb57da Merge branch 'develop' into dashboard-layout-fix 2022-05-30 22:21:17 +05:30
palash-signoz
3406bcaa5f Merge branch 'develop' into 1215-signup 2022-05-30 22:06:05 +05:30
Palash gupta
de0fd64a5e fix: button is disable until condition is met 2022-05-30 22:04:45 +05:30
Pranay Prateek
c27c026e25 Update SECURITY.md 2022-05-30 17:22:47 +05:30
Pranay Prateek
0a4bc7e181 Update SECURITY.md 2022-05-30 17:22:20 +05:30
Pranay Prateek
b6cfe9d08e Update SECURITY.md 2022-05-30 17:14:01 +05:30
Pranay Prateek
b5b9f20b1f Update SECURITY.md 2022-05-30 17:13:34 +05:30
Pranay Prateek
25c6106bd6 Create SECURITY.md 2022-05-30 17:04:02 +05:30
Palash gupta
d5877337ec fix: layout is updated 2022-05-27 07:04:22 +05:30
Palash gupta
51e0972219 fix: layout is updated 2022-05-27 07:03:12 +05:30
palash-signoz
38c0bcf4ea fix: trace table is fixed (#1208) 2022-05-26 16:51:18 +05:30
palash-signoz
d863c2781a feat: dashboard layout is updated from widgets (#1207) 2022-05-26 15:09:59 +05:30
Prashant Shahi
642c6c5920 chore: TTL and S3 config related changes (#1201)
* fix: 🐛 convert TTL APIs to async

* chore: add archive support

* chore: update TTL async APIs according to new design

* chore: 🔥 clean removeTTL API

* fix: metrics s3 config

* feat: ttl async with polling (#1195)

* feat: ttl state message change and time unit language changes (#1197)

* test:  update tests for async TTL api

* feat: ttl message info icon (#1202)

* feat: ttl pr review changes

* chore: refractoring

Co-authored-by: makeavish <makeavish786@gmail.com>
Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
Co-authored-by: palash-signoz <palash@signoz.io>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-05-25 18:19:44 +05:30
Prashant Shahi
f92e4798ce refactor: ⚰️ Remove deprecated flattner and Druid leftover files (#1194)
* refactor: ⚰️ Remove flattner from Makefile

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* refactor: ⚰️ Remove deprecated Druid leftover files

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-05-25 18:06:14 +05:30
Vishal Sharma
5d080f5564 fix: 🐛 convert TTL APIs to async #902 (#1173)
* fix: 🐛 convert TTL APIs to async

* chore: add archive support

* chore: update TTL async APIs according to new design

* chore: 🔥 clean removeTTL API

* fix: metrics s3 config

* test:  update tests for async TTL api

* chore: refractoring

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-05-25 16:55:30 +05:30
palash-signoz
eb9a8e3a97 feat: color is updated (#1198) 2022-05-25 10:48:53 +05:30
palash-signoz
4a13c524a3 chore: test result is added in the .gitignore (#1191)
* chore: test result is added in the .gitignore

* chore: cypress is removed from gitignore
2022-05-24 19:11:11 +05:30
palash-signoz
7c3edec3e6 Merge pull request #1190 from palash-signoz/react-version-resolution
fix: react version is made fixed
2022-05-24 18:37:07 +05:30
palash-signoz
199d6b6213 Merge branch 'develop' into react-version-resolution 2022-05-24 11:23:09 +05:30
palash-signoz
3d46abc1e9 Merge pull request #1189 from palash-signoz/1161-service-map
fix: handle the broken state in service map
2022-05-24 11:20:50 +05:30
palash-signoz
e6496ee67b Merge branch 'develop' into 1161-service-map 2022-05-23 16:05:09 +05:30
Pranay Prateek
fa6d5a7404 Merge branch 'develop' into react-version-resolution 2022-05-23 11:34:08 +05:30
Prashant Shahi
bd6153225f ci(build): 👷 Update build-pipeline workflow (#1187)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-23 11:33:33 +05:30
Palash gupta
bcceaf7937 fix: react version is made fixed 2022-05-22 21:25:12 +05:30
Palash gupta
4a287fd112 fix: handle the broken state 2022-05-22 20:59:35 +05:30
palash-signoz
8ec9cb2222 Merge pull request #1184 from pranshuchittora/pranshuchittora/fix/0.8.1/tsc
fix: ts typings and remove cypress types
2022-05-20 23:25:05 +05:30
palash-signoz
d3094e10bf Merge branch 'develop' into pranshuchittora/fix/0.8.1/tsc 2022-05-20 22:09:18 +05:30
Ankit Nayan
973ef56c09 Revert "feat: NODE_ENV is configured in the frontend" (#1186) 2022-05-20 18:08:20 +02:00
Prashant Shahi
26db6b5fcc Merge branch 'develop' into pranshuchittora/fix/0.8.1/tsc 2022-05-20 19:57:37 +05:30
Prashant Shahi
6e2afe1c78 fix(husky): 🚨 integrate is-ci and webpack-cli version bump (#1181)
* fix(husky): 🚨 integrate is-ci and webpack-cli version bump

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(frontend-Dockerfile): 🚀 remove NODE_ENV

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-20 19:14:58 +05:30
Pranshu Chittora
0bcd9d8d98 fix: ts typings and remove cypress types 2022-05-20 18:36:46 +05:30
Ankit Nayan
be01bc9b82 Revert "fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities (#1147)" (#1182)
This reverts commit 5a2ad9492c.
2022-05-20 14:08:12 +02:00
palash-signoz
5a2ad9492c fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities (#1147)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-ANSIREGEX-1583908

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2022-05-20 13:38:22 +02:00
Ankit Nayan
747677d4b0 Merge pull request #1152 from palash-signoz/feat/playwright
feat: playwright is configured
2022-05-20 13:33:37 +02:00
palash-signoz
e7f49cf360 Merge pull request #1178 from palash-signoz/tag-improvement
fix: tag style is updated
2022-05-20 15:46:04 +05:30
palash-signoz
3ba519457a Merge pull request #1179 from palash-signoz/metrics-application-active-key
fix: getActiveKey is refactoring into switch
2022-05-20 15:45:49 +05:30
palash-signoz
8d6646afed Merge pull request #1180 from SigNoz/prashant/frontend-docker
chore(docker): 🚀 Update Dockerfile and .dockerignore files
2022-05-20 15:38:24 +05:30
Prashant Shahi
a4cfb44953 chore(docker): 🚀 Update Dockerfile and .dockerignore files
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-20 15:07:47 +05:30
Palash gupta
c77ad88f90 fix: getActiveKey is refactoring into switch 2022-05-20 14:12:13 +05:30
palash-signoz
914be6e4cf Merge pull request #1177 from palash-signoz/set-retention
fix: set retention query is fixed
2022-05-20 14:04:21 +05:30
Palash gupta
2e9e29eb38 fix: tag style is updated 2022-05-20 14:02:46 +05:30
Palash gupta
bbed3fda22 fix: set retention query is fixed 2022-05-20 13:39:23 +05:30
Palash gupta
cbaf9b009c fix: merge conflit resolved 2022-05-20 13:29:08 +05:30
Ankit Nayan
8471dc0c1b Merge pull request #1151 from SigNoz/feat/gh-bot
feat: signoz gh-bot integration
2022-05-20 09:38:27 +02:00
Ankit Nayan
49175b3784 Merge pull request #1176 from SigNoz/feat/analytics-check-response
feat: added status code of api calls
2022-05-20 09:32:07 +02:00
Ankit Nayan
961dc7e814 feat: added status code of api calls 2022-05-20 09:31:23 +02:00
palash-signoz
1315b43aad Merge pull request #1174 from palash-signoz/compress
fix: version is made exact
2022-05-20 12:55:37 +05:30
Pranshu Chittora
9e6d918d6a fix: removed comment of PR workflow 2022-05-20 12:37:33 +05:30
palash-signoz
5b5b19dd99 Merge pull request #1170 from palash-signoz/full-view
feat: full view is updated to use query
2022-05-20 11:55:10 +05:30
palash-signoz
4b8bd2e335 feat: tags are added in the sidebar (#1153)
* feat: tags are added in the sidebar

* chore: styles is updated
2022-05-20 11:43:23 +05:30
palash-signoz
7d2883df11 Merge pull request #1169 from palash-signoz/392-tab-persist
feat: Tabs selection persist when we refresh
2022-05-20 11:41:40 +05:30
Palash gupta
cb4e465a10 Merge branch 'develop' into compress 2022-05-20 10:55:18 +05:30
palash-signoz
b1ee56b2f2 Merge pull request #1171 from palash-signoz/tsc-fix-2
fix: tsc is fixed
2022-05-20 10:53:08 +05:30
Palash gupta
98dfcead5b fix: version is made exact 2022-05-20 10:48:23 +05:30
Palash gupta
3cc4fb9c30 fix: tsc is fixed 2022-05-19 23:15:12 +05:30
Palash gupta
83cb099aa6 feat: full view is updated to use query 2022-05-19 23:08:27 +05:30
Srikanth Chekuri
c480b3c563 Add section outlining ideal workflow for significant features/changes (#1111) 2022-05-19 22:24:59 +05:30
Palash gupta
f084637f84 fix: merge conflict removed 2022-05-19 21:29:21 +05:30
Palash gupta
9fd8d12cc0 feat: Tabs selection persist when we refresh 2022-05-19 21:25:03 +05:30
palash-signoz
22f9069a29 Merge pull request #1130 from palash-signoz/trigger-alerts-error-handling
fix: error handling is updated for the trigger alerts
2022-05-19 19:34:06 +05:30
Ankit Nayan
42269a7c78 Merge pull request #962 from SigNoz/ttl-plus
Add remove TTL api, and do not allow zero or negative TTL
2022-05-19 16:01:17 +02:00
palash-signoz
2c62a1c0f0 Merge pull request #1163 from palash-signoz/389-trace-left-panel
feat: tooltip is added and max width is configured in the left panel to show text ellipsis
2022-05-19 16:43:39 +05:30
palash-signoz
b3729e0b6c Merge pull request #1167 from palash-signoz/1112-errors
fix: route is updated
2022-05-19 16:42:55 +05:30
Palash gupta
696a6adc32 fix: route is updated 2022-05-19 16:40:41 +05:30
palash-signoz
d964b66bcc Merge pull request #1145 from palash-signoz/bug-double-org
bug: double org is fixed
2022-05-19 16:09:28 +05:30
palash-signoz
4a4ad7a3da Merge pull request #1119 from palash-signoz/logout
fix: logout the user if api is not successfull
2022-05-19 16:09:14 +05:30
palash-signoz
03ef3d3bcd Merge pull request #1146 from palash-signoz/379-json-data
feat: dashboard error and loading state is removed from dashboard object
2022-05-19 15:29:45 +05:30
palash-signoz
d2913a2831 Merge pull request #1107 from palash-signoz/app-actions
chore: type is updated for thunk
2022-05-19 15:20:57 +05:30
palash-signoz
4ca3f1f945 Merge pull request #1133 from palash-signoz/develop-tsc-fix
fix: tsc is fix in cypress
2022-05-19 15:20:36 +05:30
palash-signoz
f2074f01e8 Merge pull request #1164 from palash-signoz/393-error-expection-tooltip
feat: tooltip is added in the error message and error type
2022-05-19 15:19:45 +05:30
palash-signoz
ffd5621f09 Merge pull request #1118 from palash-signoz/application-metrics-error-handling
fix: error is now handled and displayed as antd notification message in /application
2022-05-19 15:19:25 +05:30
Palash gupta
429e3bbd0d fix: logout is fixed 2022-05-19 13:43:54 +05:30
palash-signoz
3f37fe4d60 Merge pull request #1158 from palash-signoz/391-top-end-points
feat: top end point table is fixed
2022-05-19 13:30:07 +05:30
palash-signoz
ec3fed05bb Merge pull request #1132 from palash-signoz/commitlint
feat: commit lint is added in the frontend
2022-05-19 13:29:24 +05:30
palash-signoz
31583b73d8 Merge pull request #1159 from palash-signoz/390-metrics-pagination
feat: pagination is added in the application table
2022-05-19 13:29:05 +05:30
palash-signoz
02ba0eda9a Merge pull request #1154 from palash-signoz/341-url-encoding
feat: url encoding is added in the new dashboard query
2022-05-19 13:28:32 +05:30
palash-signoz
7185f2fa24 Merge pull request #1155 from palash-signoz/dashboard-widget-hover-resize
feat: resize handler is visible on hover
2022-05-19 13:27:16 +05:30
Palash gupta
ceb59e8bb5 fix: yarn is turned into npm 2022-05-19 13:24:46 +05:30
Ankit Nayan
f063a82133 Merge pull request #1124 from palash-signoz/env
feat: NODE_ENV is configured in the frontend
2022-05-19 08:46:30 +02:00
Palash gupta
072c137f26 feat: tooltip is added in the error message and error type 2022-05-19 08:48:19 +05:30
Palash gupta
358fc3a217 feat: tooltip is added and max width is configured in the left panel to show text ellipsis 2022-05-19 08:28:50 +05:30
Palash gupta
60d869ddbe feat: pagination is added in the application table 2022-05-18 22:26:41 +05:30
Palash gupta
286d46edbe feat: topend point table is fixed 2022-05-18 22:22:12 +05:30
palash-signoz
b66ce81eb6 Merge pull request #1127 from palash-signoz/remove-unneccesary-file
fix: removed unnecessary file
2022-05-18 09:50:39 +05:30
Palash gupta
60bb82ea9d feat: resize handler is visible on hover 2022-05-18 08:55:08 +05:30
Palash gupta
e3987206de feat: url encoding is added in the new dashboard query 2022-05-18 07:37:15 +05:30
Palash gupta
b8f8d59d40 feat: baseurl is added and grabbed from the env 2022-05-18 07:12:53 +05:30
Palash gupta
b2fc4776b7 feat: playwright is configured 2022-05-18 00:08:36 +05:30
Palash gupta
dd0047da07 feat: playwright is configured 2022-05-17 19:28:06 +05:30
palash-signoz
d3c67bad5b Merge pull request #1138 from pranshuchittora/pranshuchittora/fix/service-map-color
fix: service map label readable
2022-05-17 17:40:36 +05:30
Pranshu Chittora
ff3b414645 feat: signoz gh-bot integration 2022-05-17 17:23:06 +05:30
Ankit Nayan
104256dcb5 Merge pull request #926 from prashant-shahi/prashant/pprof
feat(query-service):  integrate pprof
2022-05-17 10:25:19 +02:00
Ankit Nayan
38d89fc34a Merge pull request #1136 from SigNoz/prashant/nginx-cache-improvement
chore: 🔧 improve nginx cache configuration
2022-05-17 10:22:20 +02:00
Palash gupta
a2d67f1222 fix: isProduction is removed 2022-05-17 11:55:30 +05:30
palash-signoz
8e360e001f Merge pull request #1126 from palash-signoz/alerts-rules-error-handling
fix: list alerts rules is handled
2022-05-17 11:52:06 +05:30
palash-signoz
de3928c51f Merge pull request #1128 from palash-signoz/error-handling-error-detail
fix: error details error is handled
2022-05-17 11:49:50 +05:30
Palash gupta
228fb66251 feat: dashbaord error and loading state is removed from dashboard object 2022-05-13 12:59:35 +05:30
Palash gupta
12c14f71ba bug: bug double org is fixed 2022-05-13 11:15:10 +05:30
Prashant Shahi
80de9efa0e refactor(query-service): 🔊 update pprof server error log
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-13 10:53:43 +05:30
palash-signoz
3890e06d29 Merge pull request #1123 from palash-signoz/trace-handling
fix: error handling is updated in trace
2022-05-13 10:44:15 +05:30
Palash gupta
a34dbc4942 chore: error checking condition !=200 is moved to >=400 2022-05-13 10:43:26 +05:30
Palash gupta
4b591fabf7 chore: error detail is updated 2022-05-13 10:38:32 +05:30
Palash gupta
cc978153f9 chore: added the new line 2022-05-13 10:32:36 +05:30
Prashant Shahi
9ba0b84a91 refactor(query-service): ♻️ move pprof to server.go
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-13 03:38:00 +05:30
Prashant Shahi
ac06b02d52 Merge branch 'develop' of github.com:signoz/signoz into prashant/pprof 2022-05-13 03:06:09 +05:30
Prashant Shahi
9c173c8eb3 docs(contributing): 📝 Update CONTRIBUTING.md docs (#877)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-12 22:46:43 +05:30
Srikanth Chekuri
d0b21fce01 update exec to clickhouse v2 api; update the queries 2022-05-12 16:22:59 +05:30
Pranshu Chittora
07ffd13159 fix: service map label readable 2022-05-12 15:47:47 +05:30
palash-signoz
1926998e3c fix: error is now handled in the login screen (#1120) 2022-05-12 13:43:20 +05:30
Srikanth Chekuri
eb397babcd resolve merge conflicts 2022-05-12 11:13:44 +05:30
Prashant Shahi
a0643aaf4e chore: 🔧 improve nginx cache configuration
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-11 10:53:51 +05:30
Palash gupta
169185ff89 chore: type is updated 2022-05-11 01:47:12 +05:30
Palash gupta
7feee26f85 fix: tsc is fix in cypress 2022-05-11 01:34:53 +05:30
Palash gupta
ce72b1e7a0 feat: commit lint is added in the frontend 2022-05-11 01:12:29 +05:30
Palash gupta
e06f020162 fix: error handling is updated for the trigger alerts 2022-05-10 18:30:22 +05:30
Palash gupta
574088ad54 fix: error state is updated 2022-05-10 18:23:47 +05:30
Palash gupta
6f48030ab9 fix: error details error is handled 2022-05-10 17:54:26 +05:30
Palash gupta
ea3a5e20d9 fix: removed unnecessary import 2022-05-10 17:32:22 +05:30
Palash gupta
b4833eeb0e fix: list alerts rules is handled 2022-05-10 17:27:04 +05:30
Palash gupta
ce67005d66 feat: NODE_ENV is configured in the frontend 2022-05-10 17:18:20 +05:30
Palash gupta
80c0b5621d fix: error handling is updated in trace 2022-05-10 17:09:39 +05:30
Palash gupta
b21a2707d3 fix: logout the user if api is not successfull 2022-05-10 14:11:16 +05:30
Palash gupta
4fa5ff9319 fix: error is now handled and displayed as antd notification message 2022-05-10 14:02:05 +05:30
palash-signoz
53528f1045 fix: name is updated to the tag_name in the version (#1116) 2022-05-10 12:59:15 +05:30
Ankit Nayan
9522bbf33b Merge pull request #1106 from SigNoz/release/v0.8.0
Release/v0.8.0
2022-05-06 12:16:51 +05:30
Prashant Shahi
3995de16f0 chore(release): 📌 pin versions: SigNoz 0.8.0, Alertmanager 0.23.0-0.1, OtelCollector 0.43.0-0.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-06 11:49:49 +05:30
Palash gupta
f149258de2 chore: type is updated for thunk 2022-05-06 11:27:23 +05:30
Prashant Shahi
da386b0e8e chore(nginx-config): 🔧 add cache control headers for UI (#1104)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-06 09:12:01 +05:30
palash-signoz
75cdac376f feat: cache headers is added (#1103) 2022-05-06 00:33:41 +05:30
palash-signoz
0ef13a89ed feat: updated password is updated (#1102) 2022-05-06 00:33:04 +05:30
palash-signoz
084d8ecccd chore: logout is updated (#1100) 2022-05-05 21:21:39 +05:30
Vishal Sharma
b9f3663b6c fix: exceptionStacktrace typo (#1098) 2022-05-05 20:04:59 +05:30
palash-signoz
4067aa5025 chore: handled the null case (#1096) 2022-05-05 16:16:13 +05:30
palash-signoz
ebf9316714 feat: sorting is updated (#1095) 2022-05-05 14:06:22 +05:30
palash-signoz
f5009abca6 chore: error sorting is updated (#1094) 2022-05-05 14:00:37 +05:30
palash-signoz
b16a793cbc Error is re-name to exceptions (#1093)
* chore: error is named to expections
2022-05-05 13:59:25 +05:30
Vishal Sharma
374a2415d9 fix: fix typo and return empty struct instead of null (#1092) 2022-05-05 13:58:39 +05:30
palash-signoz
3789e25a1e feat: stack trace is show in monaco editor (#1091) 2022-05-05 13:57:52 +05:30
palash-signoz
10ab057e29 chore: error details is updated (#1090)
* chore: error details is updated
2022-05-05 13:57:26 +05:30
Ankit Nayan
41b9129145 viewAccess to usage explorer 2022-05-05 13:19:42 +05:30
Pranshu Chittora
f5d10b72f0 fix: y-axis generic time unit conversion (#1088)
* fix: y-axis generic time unit conversion
2022-05-05 12:22:56 +05:30
Vishal Sharma
6fb6a576aa fix: lagInFrame and leadInFrame nullable (#1089)
https://github.com/ClickHouse/ClickHouse/pull/26521
2022-05-05 12:22:19 +05:30
palash-signoz
7cf567792a chore: error message is displayed to user in the error page (#1085) 2022-05-05 11:12:42 +05:30
Vishal Sharma
fe18e85e36 fix: error and exception type error (#1086) 2022-05-05 11:11:35 +05:30
palash-signoz
147476d802 bug: confirm password bug is fixed (#1084) 2022-05-05 10:16:32 +05:30
palash-signoz
c94f23a710 chore: reset password is updated for the user id not the logged in user id (#1082) 2022-05-04 23:19:49 +05:30
Pranshu Chittora
1a2ef4fde6 fix: y-axis time unit (#1081) 2022-05-04 22:46:33 +05:30
Ahsan Barkati
6c505f9e86 Fix error message (#1080) 2022-05-04 21:45:20 +05:30
palash-signoz
fb97540c7c chore: tsc is fixed (#1078) 2022-05-04 20:52:33 +05:30
palash-signoz
9cf5c7ef74 chore: new dashboard permission is added for editor and admin (#1077)
* chore: new dashboard permission is added for editor and admin
2022-05-04 20:40:49 +05:30
palash-signoz
6223e89d4c chore: logout is fixed (#1076) 2022-05-04 20:38:03 +05:30
Pranshu Chittora
62f8cddc27 fix: fixed graph axis and tooltip (#1075) 2022-05-04 19:30:57 +05:30
palash-signoz
ffae767fab chore: edit widget is allowed for admin and editor (#1074) 2022-05-04 19:16:30 +05:30
palash-signoz
c23f97c3d0 chore: placeholder is updated for signup name (#1069) 2022-05-04 18:06:25 +05:30
palash-signoz
11eb1e4f72 chore: error message is updated for signup (#1072) 2022-05-04 18:06:02 +05:30
palash-signoz
0554ed7ecb bug: members is fixed (#1073) 2022-05-04 18:05:37 +05:30
palash-signoz
ca4ce0d380 Merge pull request #1071 from pranshuchittora/pranshuchittora/fix/chart-y-axis-zero-values
fix: chart y-axis values not showing for values < 1
2022-05-04 17:55:59 +05:30
Ankit Nayan
65f50bb70d chore: changed permission for edit and delete alert channels (#1070) 2022-05-04 17:53:53 +05:30
Pranshu Chittora
dbe9f3a034 fix: chart y-axis values not showing for values < 1 2022-05-04 17:52:14 +05:30
palash-signoz
7cdd136f61 chore: redirection is added (#1063) 2022-05-04 17:37:11 +05:30
palash-signoz
21d5e0b71c text is made optional (#1064) 2022-05-04 17:36:53 +05:30
palash-signoz
fe53aa412b copy to clipboard is updated (#1065) 2022-05-04 17:36:11 +05:30
palash-signoz
6c5a48082b delete widget is added for admin and editor (#1066) 2022-05-04 17:35:48 +05:30
palash-signoz
b7adc27f02 chore: button is not made disable for firstName (#1067) 2022-05-04 17:34:50 +05:30
Ankit Nayan
67b4290846 chore: change in error message on register (#1068) 2022-05-04 17:33:54 +05:30
Ankit Nayan
8a7cbc8ad3 Update http_handler.go 2022-05-04 17:00:49 +05:30
Vishal Sharma
c74d87a21a fix: handle empty data scenarios (#1062) 2022-05-04 15:03:48 +05:30
Ahsan Barkati
6486425f46 fix(auth): Return 403 for forbidden requests due to rbac (#1060)
* Return error json for user not found
* Return 403 for rbac error
* Return not_found instead of internal_error for getInvite
2022-05-04 14:50:15 +05:30
palash-signoz
5b316afa12 chore: sorting key is updated (#1059) 2022-05-04 14:32:38 +05:30
Ankit Nayan
2dcc6fda77 chore: analtics changed after rbac (#1058) 2022-05-04 14:27:32 +05:30
palash-signoz
a2f570d78c Merge pull request #1057 from pranshuchittora/pranshuchittora/fix/resource-attribute-error-message 2022-05-04 14:11:57 +05:30
Pranshu Chittora
ef209e11d5 fix: resource attribute error message 2022-05-04 13:52:44 +05:30
palash-signoz
1851e76bca Merge pull request #1056 from pranshuchittora/pranshuchittora/fix/dashboard-widget-uuid
fix: dashboard uuid issue
2022-05-04 12:57:20 +05:30
Pranshu Chittora
fa23050916 fix: dashboard uuid issue 2022-05-04 12:56:19 +05:30
palash-signoz
79475bde71 Merge pull request #1054 from palash-signoz/interceptor-is-updated
chore: interceptor is updated
2022-05-04 12:35:50 +05:30
Palash gupta
039201acae chore: interceptor is updated 2022-05-04 12:31:37 +05:30
palash-signoz
22454abc4a chore: api is updated (#1053) 2022-05-04 12:02:14 +05:30
palash-signoz
4c8b7af0eb chore: routesToSkip is updated (#1052) 2022-05-04 12:01:36 +05:30
palash-signoz
5caf94f024 feat: permission is added in the dashboard button (#1051) 2022-05-04 01:31:44 +05:30
Ankit Nayan
ce0b37ca2e Update jwt.go 2022-05-04 01:06:45 +05:30
palash-signoz
5f529e1c10 feat: refresh token is fixed (#1049) 2022-05-04 01:05:38 +05:30
Prashant Shahi
05c923df9b chore: 📌 pin alertmanager and otelcollector version and changes (#1048)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-03 23:59:29 +05:30
palash-signoz
90637212bc feat: total count is generated from the filter (#1047)
* feat: total count is generated from the filter
2022-05-03 23:33:12 +05:30
palash-signoz
b58f45c268 chore: scroll is made auto (#1044) 2022-05-03 23:04:16 +05:30
Vishal Sharma
6a6fd44719 fix: convert serviceMap API to POST (#1046) 2022-05-03 23:03:47 +05:30
Vishal Sharma
81cc120539 fix: aggregate of avg and percentile (#1045) 2022-05-03 23:02:49 +05:30
Pranshu Chittora
831381a1ff fix: trace detail styling issue (#1043) 2022-05-03 21:27:44 +05:30
palash-signoz
fd0656e0fc chore: redirect the user to application is user is navigated to non logged in page (#1042) 2022-05-03 21:27:17 +05:30
palash-signoz
e217ea0c9c chore: default value is added when on unmount (#1041) 2022-05-03 21:24:55 +05:30
palash-signoz
bdf9333dcf chore: version is added in the src of Logo (#1039) 2022-05-03 21:24:02 +05:30
Pranshu Chittora
eae97d6ffc fix: migrate service map APIs from GET to POST (#1038) 2022-05-03 21:21:40 +05:30
palash-signoz
9f5241e82c chore: error message is updated (#1037) 2022-05-03 21:21:10 +05:30
palash-signoz
284eda4072 chore: placeholder is updated for the signup page (#1035)
* chore: placeholder is updated for the signup page
2022-05-03 21:20:36 +05:30
palash-signoz
63693a4185 chore: name field is hidden when empty name is received from invite details api (#1036) 2022-05-03 21:19:35 +05:30
palash-signoz
d9cf9071d3 chore: text for forgot password is updated (#1034) 2022-05-03 21:15:58 +05:30
palash-signoz
5e41c7f62b chore: header style is fixed in the light mode (#1033) 2022-05-03 21:15:23 +05:30
palash-signoz
e903277143 feat: sorting duration is added (#1032)
* feat: sorting duration is added in trace filter page
2022-05-03 21:14:40 +05:30
palash-signoz
f2def38df8 Merge pull request #1040 from pranshuchittora/pranshuchittora/fix/empty-graph-on-zero-values
fix: empty graph showing no data on zero values
2022-05-03 21:10:55 +05:30
Ankit Nayan
71e742fb2b Update otel-collector-config.yaml 2022-05-03 21:01:49 +05:30
Pranshu Chittora
571e087f31 fix: empty graph showing no data on zero values 2022-05-03 20:14:35 +05:30
Srikanth Chekuri
3e5f9f3b25 bugfix: missing destination name for DiskItem (#1031) 2022-05-03 19:13:32 +05:30
palash-signoz
c969b5f329 chore: Private Wrapper is updated (#1029)
* chore: Private Wrapper is updated
2022-05-03 17:11:07 +05:30
palash-signoz
5bcf42d398 chore: isPasswordNotValidMessage is updated (#1030) 2022-05-03 17:10:39 +05:30
palash-signoz
c81b0b2a8b Auth Wrapper is updated (#1028) 2022-05-03 16:01:41 +05:30
palash-signoz
d52308c9b5 feat: some routes are removed from the date picker to display (#1023) 2022-05-03 15:59:12 +05:30
Pranshu Chittora
7948bca710 feat: resource attributes based filter for metrics (#1022)
* feat: resource attributes based filtering enabled
2022-05-03 15:41:40 +05:30
palash-signoz
29c0b43481 bug: Rows per page setting is now working (#1026) 2022-05-03 15:30:08 +05:30
palash-signoz
9351fd09c2 bug: exclude param is added in the getTagFilters (#1025) 2022-05-03 15:29:54 +05:30
palash-signoz
59f32884d2 Feat(UI): Auth (#1018)
* auth and rbac frontend changes
2022-05-03 15:27:09 +05:30
Ahsan Barkati
6ccdc5296e feat(auth): Add auth and access-controls support (#874)
* auth and rbac support enabled
2022-05-03 15:26:32 +05:30
Amol Umbark
3ef9d96678 Pagerduty - Create, Edit and Test Features (#1016)
* enabled sending alerts to pagerduty
2022-05-03 11:28:00 +05:30
Vishal Sharma
642ece288e perf: Query-service Performance Improvements (traces) (#838)
*feat: Update query-service Go version to 1.17 #911
*chore: Upgrade to clickhouse versions v2 #751
*feat: Duration sorting in events table of Trace-filter page #826
*feat: Add grpc status code to traces view #975
*feat: added filtering by resource attributes #881
2022-05-03 11:20:57 +05:30
Pranshu Chittora
3ab4f71aa1 enhancement(FE): span time unit normalisation (#1021)
* feat: relevant span time unit on trace detail page

* fix: remove time unit on hover on flame graph
2022-04-29 14:33:57 +05:30
palash-signoz
b5be770a03 Merge pull request #1019 from palash-signoz/error-details-fix
bug: editor is updated in error details page

> Not sure about the product requirements though

tsc is giving error at develop
2022-04-29 10:27:06 +05:30
Palash gupta
08e3428744 feat: editor is updated in error details page 2022-04-27 22:21:57 +05:30
palash-signoz
b335d440cf Feat: import export dashboard (#980)
* feat: added import & export functionality in dashboards
2022-04-25 22:41:46 +05:30
cui fliter
1293378c5c fix some typos (#976)
Signed-off-by: cuishuang <imcusg@gmail.com>
2022-04-22 20:04:37 +05:30
palash-signoz
5424c7714f feat: Error exception (#979)
* feat: error expection page is made
2022-04-22 20:03:08 +05:30
palash-signoz
95311db543 feat: tagkey length check is added (#999) 2022-04-22 19:45:14 +05:30
palash-signoz
bf52722689 bug: list of rules is fixed when created and come back to all rules (#998) 2022-04-22 19:39:01 +05:30
Vishal Sharma
6064840dd1 feat: support gRPC status, method in trace table (#987) 2022-04-22 19:38:08 +05:30
Pranshu Chittora
182adc551c feat: dashboard search and filter (#1005)
* feat: enable search and filter in dashboards
2022-04-22 18:57:05 +05:30
Amol Umbark
2b5b79e34a (feature): UI for Test alert channels (#994)
* (feature): Implemented test channel function for webhook and slack
2022-04-22 16:56:18 +05:30
Amol Umbark
508c6ced80 (feature): API - Implement receiver/channel test functionality (#993)
* (feature): Added test receiver/channel functionality
2022-04-22 12:11:19 +05:30
Pranshu Chittora
3c2173de9e feat: new dashboard widget's option selection (#982)
* feat: new dashboard widget's option selection

* fix: overflowing legend

* feat: delete menu item is of type danger

* feat: added keyboard events onFocus and onBlur
2022-04-19 10:57:56 +05:30
palash-signoz
9a6bcaadf8 Merge pull request #1000 from palash-signoz/984-updated-response
feat: httpCode and httpStatus is updated to statusCode and method
2022-04-19 10:52:31 +05:30
Palash gupta
08bbb0259d feat: httpCode and httpStatus is updated to code and method 2022-04-19 00:21:30 +05:30
palash-signoz
93638d5615 Use fetch fix (#995)
* feat: useFetch in tag value is removed and moved to use query

* feat: useFetch in all channels is removed and moved to use query

* feat: useFetch in edit rule is removed and moved to use query

* feat: useFetch in general settings is removed and moved to use query

* feat: useFetch in all alerts is changed into use query
2022-04-18 15:24:51 +05:30
Ankit Nayan
844ca57686 Merge pull request #997 from SigNoz/dashboard-bug-fix
(bugfix): remove validation on post data id
2022-04-18 14:37:46 +05:30
Srikanth Chekuri
b2eec25f33 (bugfix): remove validation on post data id 2022-04-18 14:12:49 +05:30
Ankit Nayan
61d01fa2d5 feat: added action to verify that every pr has a linked issue 2022-04-15 11:32:19 +05:30
Ankit Nayan
a6bf6e4e07 Merge pull request #923 from SigNoz/uuid-server
chore: generate uuid on server side
2022-04-13 11:45:30 +05:30
palash-signoz
d454482f43 wdyr is updated (#981) 2022-04-11 17:17:31 +05:30
Pranay Prateek
f6aece6349 Update CONTRIBUTING.md 2022-04-08 10:13:36 -07:00
Pranay Prateek
dc9508269d Update commentLinesForSetup.sh
Updating frontend service line number
2022-04-08 10:12:58 -07:00
Pranay Prateek
a6c41f312d Update CONTRIBUTING.md 2022-04-08 10:09:24 -07:00
palash-signoz
f487f7420b Merge pull request #972 from palash-signoz/refetch-onwindow-focus
chore: refetchOnWindowFocus is made false to global level
2022-04-08 15:13:30 +05:30
palash-signoz
da8f3a6e81 Merge pull request #973 from palash-signoz/eslint-used-var-error
chore(eslint): @typescript-eslint/no-unused-vars is made to error
2022-04-08 15:13:20 +05:30
palash-signoz
d102c94670 bug: unused import is removed and two unwanted eslint rule is removed (#968) 2022-04-08 14:05:16 +05:30
palash-signoz
60288f7ba0 chore(refactor): Signup app layout (#969)
* feat: useFetch is upgraded to useFetchQueries

* chore: en-gb and common.json is updated over public locale
2022-04-08 13:49:33 +05:30
Palash gupta
0cbe17a315 chore(eslint): @typescript-eslint/no-unused-vars is made to error 2022-04-08 02:15:50 +05:30
Palash gupta
dce9f36a8e chore: refetchOnWindowFocus is made false to global level 2022-04-08 02:12:54 +05:30
Ankit Nayan
aa5100261d Merge pull request #956 from SigNoz/release/v0.7.5
Release/v0.7.5
2022-04-07 17:12:16 +05:30
Prashant Shahi
f4cc2a3a05 Merge branch 'develop' into release/v0.7.5 2022-04-07 15:32:44 +05:30
palash-signoz
041a5249b3 feat: onClick is added in the row (#966) 2022-04-07 13:11:27 +05:30
palash-signoz
a767697a86 Merge pull request #965 from palash-signoz/trace-filter-fix-incoming-from-metrics-page
Bug: Trace filter fix incoming from metrics page
2022-04-07 12:59:37 +05:30
palash-signoz
71cb70c62c Merge pull request #958 from palash-signoz/style-trace-search
bug: style-trace-search is fixed
2022-04-07 11:03:40 +05:30
Pranshu Chittora
647cabc4f4 feat: migrated trace detail to use query (#963)
* feat: migrated trace detail to use query

* fix: remove unused imports

* chore: useQuery config is updated

Co-authored-by: Palash gupta <palash@signoz.io>
2022-04-07 10:48:09 +05:30
Palash gupta
e864e33ad3 bug: value is updated on selection 2022-04-06 22:01:42 +05:30
Palash gupta
5bdbe792f5 chore: selected filter is not removed when user close panel 2022-04-06 21:28:17 +05:30
Palash gupta
399efb0fb2 bug: trace filter bug are collapsed 2022-04-06 21:19:12 +05:30
palash-signoz
4b72de6884 Merge pull request #959 from pranshuchittora/pranshuchittora/fix/ttl-enhancements
fix: general setting TTL enhancements
2022-04-06 16:34:07 +05:30
palash-signoz
9f1473e7de Merge pull request #957 from palash-signoz/trace-table-current-page
bug: current page is updated in redux to see the updated values
2022-04-06 16:33:11 +05:30
Srikanth Chekuri
d6c4df8b4b Add remove TTL api, and do not allow zero or negative TTL 2022-04-06 16:29:10 +05:30
Palash gupta
7150971dc0 chore: style is updated 2022-04-06 16:28:39 +05:30
Pranshu Chittora
d0846b8dd2 fix: general setting TTL enhancements 2022-04-06 12:40:14 +05:30
Palash gupta
ead6885b29 bug: style-trace-search is fixed 2022-04-06 11:33:56 +05:30
Palash gupta
d72dacdc1f bug: current page is updated in redux to see the updated values 2022-04-06 10:52:46 +05:30
Prashant Shahi
1d6ddd4890 chore(install-script): 🩹 fix email condition
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 01:37:07 +05:30
Prashant Shahi
58daca1579 chore(docker-compose): 🔧 add restart policy and frontend dependency
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 00:52:48 +05:30
Prashant Shahi
1e522ad8f1 chore(release): 📌 pin 0.7.5 SigNoz version and 0.6.1 Alertmanager version
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 00:19:13 +05:30
Prashant Shahi
8809105a8d Prashant/deploy changes (#955)
- set information log level in clickhouse logger config
- maximum logs size 150m (3 files each of 50m)

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 00:05:05 +05:30
palash-signoz
064c3e0449 chore: default text is updated (#954)
* text and title is updated
2022-04-05 23:13:12 +05:30
palash-signoz
2a348e916c feat: version page is added (#924)
* feat👔 : getLatestVersion api is added

* chore: VERSION page is added

* feat:  version page is added

* chore: all string is grabbed from locale

* chore: warning is removed

* chore: translation json is added

* chore: feedback about version is added

* chore: made two different functions

* unused import is removed

* feat: version changes are updated

* chore: if current version is present then it is displayed
2022-04-05 18:21:25 +05:30
palash-signoz
5744193f50 Merge pull request #953 from pranshuchittora/pranshuchittora/fix/trace-detail-error-span-color
fix: error color for spans having error on trace detail page
2022-04-05 18:00:08 +05:30
Pranshu Chittora
ccf352f2db fix: error color for spans having error on trace detail page 2022-04-05 17:02:39 +05:30
palash-signoz
6e446dc0ab Merge pull request #949 from pranshuchittora/pranshuchittora/feat/ttl-s3
feat(FE): TTL/s3 integration
2022-04-05 16:28:41 +05:30
Pranshu Chittora
566c2becdf feat: dynamic step size for the data for graphs (#929)
* feat: dynamic step size for the data for graphs

* fix: remove console.log

* chore: add jest globals

* feat: add step size for dashboard

* chore: undo .eslintignore
2022-04-05 16:09:57 +05:30
Pranshu Chittora
3b3fd2b3a9 chore: update type 2022-04-05 16:09:04 +05:30
Pranshu Chittora
eae53d9eff feat: condition changes 2022-04-05 16:05:46 +05:30
Pranshu Chittora
42842b6b17 feat: i18n support for setting route names 2022-04-05 15:51:38 +05:30
Pranshu Chittora
95f8dfb4bc feat: i18n support for settings page 2022-04-05 15:44:01 +05:30
palash-signoz
a8c5934fc5 fix: Fix jest (#945)
* bug: jest is now fixed

* chore: files are included for the eslint

* chore: build is fixed

* test: jest test are fixed
2022-04-05 14:47:37 +05:30
palash-signoz
3f2a4d6eac bug: Trace filter page fixes (#846)
* order is added in the url
* local min max duration is kept in memory to show min and max even after filtering by duration
* checkbox ordering does not change when the user selects or un-selects a checkbox
2022-04-05 13:23:08 +05:30
Pranshu Chittora
170609a81f chore: type changes 2022-04-05 12:26:43 +05:30
Pranshu Chittora
76fccbbba4 fix: styling changes 2022-04-05 12:19:54 +05:30
palash-signoz
147ed9f24b chore: editor config is added (#818) 2022-04-05 11:19:06 +05:30
palash-signoz
a69bc321a9 Merge pull request #935 from pranshuchittora/pranshuchittora/feat/graph-memory-issue
feat: FE memory fixes and UX enhancements
2022-04-05 10:35:08 +05:30
Pranshu Chittora
c9e02a8b25 feat: final touches to the ttl 2022-04-05 00:06:13 +05:30
Pranshu Chittora
24d6a1e7b2 feat: s3 ttl validation 2022-04-04 19:38:23 +05:30
Nishidh Jain
a0efa63185 Fix(FE) : Ask for confirmation before deleting any dashboard from dashboard list (#534)
* A confirmation dialog will pop up before deleting any dashboard

Co-authored-by: Palash gupta <palash@signoz.io>
2022-04-04 17:35:44 +05:30
Ankit Nayan
fd83cea9a0 chore: removing version api from being tracked (#950) 2022-04-04 17:07:21 +05:30
Pranshu Chittora
5be1eb58b2 feat: ttl api integration 2022-04-04 15:26:29 +05:30
Pranshu Chittora
8367c106bc Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/feat/ttl-s3 2022-04-04 15:06:33 +05:30
Pranshu Chittora
8064ae1f37 feat: ttl api integration 2022-04-04 15:06:06 +05:30
palash-signoz
ab4d9af442 Merge pull request #928 from palash-signoz/tag-value-suggestion
feat: Tag value suggestion
2022-04-04 12:52:34 +05:30
Palash gupta
eb0d3374d5 Merge branch 'develop' into tag-value-suggestion 2022-04-04 12:35:50 +05:30
palash-signoz
6c4c814b3f bug: pathname check is added (#948) 2022-04-04 10:25:15 +05:30
Palash gupta
32e8e48928 chore: behaviour for dropdown is updated 2022-04-04 08:24:28 +05:30
Naman Jain
53e7037f48 fix: run go vet to fix some issues with json tag (#936)
Co-authored-by: Naman Jain <jain_n@apple.com>
2022-04-02 16:15:03 +05:30
palash-signoz
a566b5dc97 bug: no service and loading check are added (#934) 2022-04-01 17:59:44 +05:30
Pranshu Chittora
4dc668fd13 fix: remove unused props 2022-04-01 17:43:56 +05:30
palash-signoz
d085506d3e bug: logged in check is added in the useEffect (#921) 2022-04-01 15:47:39 +05:30
palash-signoz
1b28a4e6f5 chore: links are updated for all dashboard and promql (#908) 2022-04-01 15:43:58 +05:30
Pranshu Chittora
20e924b116 feat: S3 TTL support 2022-04-01 15:12:30 +05:30
Ahsan Barkati
1d28ceb3d7 feat(query-service): Add cold storage support in getTTL API (#922)
* Add cold storage support in getTTL API
2022-04-01 11:22:25 +05:30
Ankit Nayan
0ff4c040bf Merge pull request #933 from SigNoz/release/v0.7.4
Release/v0.7.4
2022-03-29 23:32:49 +05:30
Pranshu Chittora
5a5aca2113 chore: remove unused code 2022-03-29 16:06:27 +05:30
Pranshu Chittora
cb22117a0f Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/feat/graph-memory-issue 2022-03-29 16:05:49 +05:30
Pranshu Chittora
739946fa47 fix: over memory allocation on Graph on big time range 2022-03-29 16:05:08 +05:30
Palash gupta
5556d1d6fc feat: tag value suggestion is updated 2022-03-29 09:59:50 +05:30
Palash gupta
d4d1104a53 WIP: value suggestion is added 2022-03-29 00:02:56 +05:30
Palash gupta
225a345baa chore: getTagValue api is added 2022-03-29 00:02:16 +05:30
Prashant Shahi
31443dabe7 feat(query-service): integrate pprof
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-28 21:44:40 +05:30
Srikanth Chekuri
eb4abe900c chore: generate uuid on server side 2022-03-28 09:14:40 +05:30
Ankit Nayan
0426bf06eb Merge pull request #901 from SigNoz/release/v0.7.3
Release/v0.7.3
2022-03-24 01:22:42 +05:30
Ankit Nayan
36d8bc7bc6 Merge pull request #891 from SigNoz/release/v0.7.2
Release/v0.7.2
2022-03-23 12:38:18 +05:30
Ankit Nayan
efe57ff15d Merge pull request #825 from SigNoz/release/v0.7.1
Release/v0.7.1
2022-03-04 14:11:41 +05:30
Ankit Nayan
0fb5b90e4e Merge pull request #822 from SigNoz/release/v0.7.0
Release/v0.7.0
2022-03-04 12:12:12 +05:30
Ankit Nayan
91bdb77a0e Revert "Release/v0.7.0 (#814)" (#820)
This reverts commit eb63b6da2a.
2022-03-04 12:08:54 +05:30
Prashant Shahi
eb63b6da2a Release/v0.7.0 (#814)
* feat(FE): dynamic step size of metrics page

* chore(tests): migrate to dayjs for generating timestamp

* bug: sorting of date is fixed

* feat: soring filter is added

* chore: typo is fixed

* feat(backend): support custom events in span

* fix: encode event string to fix parsing at frontend

* chore: styles is updated for the not found button

* chore: update otel-collector to 0.43.0

* fix: remove encoding

* fix: set userId as distinctId if failed to fetch IP

* Fe: Feat/trace detail (#764)

* feat: new trace detail page flame graph

* feat: new trace detail page layout

* test: trace detail is wip

* chore: trace details in wip

* feat: trace detail page timeline component

* chore: spantoTree is updated

* chore: gantchart is updated

* chore: onClick is added

* chore: isSpanPresentInSearchString util is added

* chore: trace graph is updated

* chore: added the hack to work

* feat: is span present util is added

* chore: in span ms is added

* chore: tooltip is updated

* WIP: chore: trace details changes are updated

* feat: getTraceItem is added

* feat: trace detail page is updated

* feat: trace detail styling changes

* feat: trace detail page is updated

* feat: implement span hover, select, focus and reset

* feat: reset focus

* feat: spanId as query table and unfurling

* feat: trace details is updated

* chore: remove lodash

* chore: remove lodash

* feat: trace details is updated

* feat: new trace detail page styling changes

* feat: new trace detail page styling changes

* feat: improved styling

* feat: remove horizontal scrolling

* feat: new trace detail page modify caret icon

* chore styles are updated

* Revert "chore: Trace styles"

* chore styles are updated

* feat: timeline normalisation

* chore: remove mock data

* chore: sort tree data util is added and selected span component is updated

* chore: trace changes are updated

* chore: trace changes are updated

* chore: trace changes are updated

* feat: refactored time units for new trace detail page

* chore: remove mockdata

* feat: new trace detail page themeing and interval loop fix

* chore: error tag is updated

* chore: error tag is updated

* chore: error tag is updated

* chore: error tag is updated

* chore: console is removed

* fix: error tag expand button

* chore: expanded panel is updated

* feat: scroll span from gantt chart intoview

* chore: trace detail is removed

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>

* bug: Trace search bug is resolved (#741)

* bug: Trace search bug is resolved

* bug: Trace search bug is resolved

* chore: parseTagsToQuery is updated

* chore: parseTagsToQuery is updated

* chore: parseTagsToQuery is updated

* chore: parseTagsToQuery is updated

* chore: ️ add hotrod template and install/delete scripts (#801)

* chore: ️ add hotrod template and scripts

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* refactor:  conditionally compute image

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* fix: 🩹 add signoz namespace

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🔨  fix namespace template in scripts

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* docs(hotrod): 📝 Add README for hotrod k8s

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Ankit Nayan <ankit@signoz.io>

* chore(release): 📌 pin SigNoz and OtelCollector versions

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
Co-authored-by: Palash gupta <palash@signoz.io>
Co-authored-by: makeavish <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-03-04 01:41:49 +05:30
Ankit Nayan
abe4940e74 Merge pull request #777 from SigNoz/release/v0.6.2
Release/v0.6.2
2022-02-25 22:15:05 +05:30
Ankit Nayan
8e621b6a70 Merge pull request #727 from SigNoz/prashant/hotrod-yaml
chore: 🔧 update default jaeger endpoint in hotrod manifest
2022-02-15 20:49:08 +05:30
Ankit Nayan
3302a84ab5 Merge pull request #714 from SigNoz/release/v0.6.1
Release: v0.6.1
2022-02-11 16:57:48 +05:30
490 changed files with 20193 additions and 14490 deletions

33
.editorconfig Normal file
View File

@@ -0,0 +1,33 @@
# EditorConfig is awesome: https://EditorConfig.org
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
# Matches multiple files with brace expansion notation
# Set default charset
[*.{js,py}]
charset = utf-8
# 4 space indentation
[*.py]
indent_style = space
indent_size = 4
# Tab indentation (no size specified)
[Makefile]
indent_style = tab
# Indentation override for all JS under lib directory
[lib/**.js]
indent_style = space
indent_size = 2
# Matches the exact files either package.json or .travis.yml
[{package.json,.travis.yml}]
indent_style = space
indent_size = 2

View File

@@ -1,40 +1,15 @@
name: build-pipeline
on:
pull_request:
branches:
- develop
- main
- v*
paths:
- "pkg/**"
- "frontend/**"
- release/v*
jobs:
get_filters:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
frontend: ${{ steps.filter.outputs.frontend }}
query-service: ${{ steps.filter.outputs.query-service }}
flattener: ${{ steps.filter.outputs.flattener }}
steps:
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@v2
id: filter
with:
filters: |
frontend:
- 'frontend/**'
query-service:
- 'pkg/query-service/**'
flattener:
- 'pkg/processors/flattener/**'
build-frontend:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
@@ -52,9 +27,6 @@ jobs:
build-query-service:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
@@ -62,16 +34,3 @@ jobs:
shell: bash
run: |
make build-query-service-amd64
build-flattener:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build flattener docker image
shell: bash
run: |
make build-flattener-amd64

View File

@@ -0,0 +1,27 @@
on:
pull_request_target:
types:
- closed
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
jobs:
create_issue_on_merge:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:
- name: Checkout Codebase
uses: actions/checkout@v2
with:
repository: signoz/gh-bot
- name: Use Node v16
uses: actions/setup-node@v2
with:
node-version: 16
- name: Setup Cache & Install Dependencies
uses: bahmutov/npm-install@v1
with:
install-command: yarn --frozen-lockfile
- name: Comment on PR
run: node create-issue.js

22
.github/workflows/playwright.yaml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Playwright Tests
on:
deployment_status:
jobs:
test:
timeout-minutes: 60
runs-on: ubuntu-latest
if: github.event.deployment_status.state == 'success'
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: "14.x"
- name: Install dependencies
run: npm ci
- name: Install Playwright
run: npx playwright install --with-deps
- name: Run Playwright tests
run: npm run test:e2e
env:
# This might depend on your test-runner/language binding
PLAYWRIGHT_TEST_BASE_URL: ${{ github.event.deployment_status.target_url }}

View File

@@ -0,0 +1,20 @@
# This workflow will inspect a pull request to ensure there is a linked issue or a
# valid issue is mentioned in the body. If neither is present it fails the check and adds
# a comment alerting users of this missing requirement.
name: VerifyIssue
on:
pull_request:
types: [edited, synchronize, opened, reopened]
check_run:
jobs:
verify_linked_issue:
runs-on: ubuntu-latest
name: Ensure Pull Request has a linked issue.
steps:
- name: Verify Linked Issue
uses: hattan/verify-linked-issue-action@v1.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

7
.gitignore vendored
View File

@@ -15,6 +15,7 @@ frontend/build
frontend/.vscode
frontend/.yarnclean
frontend/.temp_cache
frontend/test-results
# misc
.DS_Store
@@ -27,10 +28,6 @@ frontend/npm-debug.log*
frontend/yarn-debug.log*
frontend/yarn-error.log*
frontend/src/constants/env.ts
frontend/cypress/**/*.mp4
# env file for cypress
frontend/cypress.env.json
.idea
@@ -43,7 +40,7 @@ frontend/cypress.env.json
frontend/*.env
pkg/query-service/signoz.db
pkg/query-service/tframe/test-deploy/data/
pkg/query-service/tests/test-deploy/data/
# local data

View File

@@ -4,4 +4,4 @@
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml

View File

@@ -18,10 +18,10 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
### Contribute to Frontend with Docker installation of SigNoz
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L62`
- run `cd deploy` to move to deploy directory
- Install signoz locally without the frontend
- Add below configuration to query-service section at `docker/clickhouse-setup/docker-compose.yaml#L36`
- Add below configuration to query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
```docker
ports:
@@ -55,9 +55,9 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht
- git clone https://github.com/SigNoz/signoz.git
- run `cd signoz` to move to signoz directory
- run `sudo make dev-setup` to configure local setup to run query-service
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L45`
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L28`
- add below configuration to clickhouse section at `docker/clickhouse-setup/docker-compose.yaml#L6`
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml`
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml`
- add below configuration to clickhouse section at `docker/clickhouse-setup/docker-compose.yaml`
```docker
expose:
- 9000
@@ -106,32 +106,70 @@ Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/char
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace.
- run `kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**To install HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
**To load data with HotROD sample app:**
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
**To stop the load generation:**
```sh
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
**To delete HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```
---
## General Instructions
**Before making any significant changes, please open an issue**. Each issue
should describe the following:
* Requirement - what kind of use case are you trying to solve?
* Proposal - what do you suggest to solve the problem or improve the existing
situation?
* Any open questions to address
Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone. Once the approach is agreed upon, make your changes
and open a pull request(s). Unless your change is small, Please consider submitting different PRs:
* First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes etc...
* This PR is usually trivial to review, so the size limit does not apply to
it.
* Second PR should include the concrete implementation of the component. If the
size of this PR is larger than the recommended size consider splitting it in
multiple PRs.
* If there are multiple sub-component then ideally each one should be implemented as
a separate pull request.
* Last PR should include changes to any user facing documentation. And should include
end to end tests if applicable. The component must be enabled
only after sufficient testing, and there is enough confidence in the
stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
- If you find any bugs, please create an issue

View File

@@ -10,7 +10,6 @@ BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
# Internal variables or constants.
FRONTEND_DIRECTORY ?= frontend
FLATTENER_DIRECTORY ?= pkg/processors/flattener
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
@@ -20,7 +19,6 @@ DOCKER_TAG ?= latest
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
# Build-time Go variables
PACKAGE?=go.signoz.io/query-service
@@ -31,7 +29,7 @@ gitBranch=${PACKAGE}/version.gitBranch
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
all: build-push-frontend build-push-query-service build-push-flattener
all: build-push-frontend build-push-query-service
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
@@ -73,27 +71,6 @@ build-push-query-service:
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of flattener
.PHONY: build-flattener-amd64 build-push-flattener
# Step to build docker image of flattener in amd64 (used in build pipeline)
build-flattener-amd64:
@echo "------------------"
@echo "--> Building flattener docker image for amd64"
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
build-push-flattener:
@echo "------------------"
@echo "--> Building and pushing flattener docker image"
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane \
--no-cache --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
mkdir -p /var/lib/signoz
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
@@ -115,11 +92,9 @@ down-arm:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.arm.yaml down -v
clear-standalone-data:
@cd $(STANDALONE_DIRECTORY)
@docker run --rm -v "data:/pwd" busybox \
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
clear-swarm-data:
@cd $(SWARM_DIRECTORY)
@docker run --rm -v "data:/pwd" busybox \
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"

18
SECURITY.md Normal file
View File

@@ -0,0 +1,18 @@
# Security Policy
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
## Supported Versions
We always recommend using the latest version of SigNoz to ensure you get all security updates
## Reporting a Vulnerability
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
Once we've received your email we'll keep you updated as we fix the vulnerability.
## Thanks
Thank you for keeping SigNoz and our users safe. 🙇

View File

@@ -1,11 +1,8 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<level>information</level>
<console>1</console>
</logger>
<http_port>8123</http_port>
@@ -45,6 +42,34 @@
</client>
</openSSL>
<!-- Example config for tiered storage -->
<!-- <storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration> -->
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
<!--
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>

View File

@@ -3,12 +3,19 @@ version: "3.9"
services:
clickhouse:
image: yandex/clickhouse-server:21.12.3.32
# ports:
# - "9000:9000"
# - "8123:8123"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
@@ -17,11 +24,11 @@ services:
retries: 3
alertmanager:
image: signoz/alertmanager:0.6.0
image: signoz/alertmanager:0.23.0-0.1
volumes:
- ./data/alertmanager:/data
command:
- --queryService.url=http://query-service:8080
- --queryService.url=http://query-service:8085
- --storage.path=/data
depends_on:
- query-service
@@ -30,21 +37,27 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.7.4
image: signoz/query-service:0.8.2
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
@@ -52,8 +65,12 @@ services:
- clickhouse
frontend:
image: signoz/frontend:0.7.4
image: signoz/frontend:0.8.2
deploy:
restart_policy:
condition: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
@@ -61,7 +78,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.43.0
image: signoz/otelcontribcol:0.45.1-0.3
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
@@ -69,7 +86,7 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8889:8889" # Prometheus metrics exposed by the agent
# - "13133" # health_check
# - "13133:13133" # health_check
# - "14268:14268" # Jaeger receiver
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zpages extension
@@ -87,7 +104,7 @@ services:
- clickhouse
otel-collector-metrics:
image: signoz/otelcontribcol:0.43.0
image: signoz/otelcontribcol:0.45.1-0.3
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml

View File

@@ -28,6 +28,11 @@ processors:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 10000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -47,8 +52,8 @@ extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
@@ -61,7 +66,7 @@ service:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhouse]
exporters: [clickhousetraces]
metrics:
receivers: [otlp, hostmetrics]
processors: [batch]

View File

@@ -1,7 +1,7 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
@@ -12,19 +12,26 @@ server {
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api/alertmanager {
proxy_pass http://alertmanager:9093/api/v2;
}
location /api {
proxy_pass http://query-service:8080/api;
proxy_pass http://query-service:8080/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
root /usr/share/nginx/html;
}
}

View File

@@ -1,11 +1,8 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<level>information</level>
<console>1</console>
</logger>
<http_port>8123</http_port>
@@ -46,30 +43,31 @@
</openSSL>
<!-- Example config for tiered storage -->
<!-- <storage_configuration> -->
<!-- <disks> -->
<!-- <default> -->
<!-- </default> -->
<!-- <s3> -->
<!-- <type>s3</type> -->
<!-- <endpoint>http://172.17.0.1:9100/test/random/</endpoint> -->
<!-- <access_key_id>ash</access_key_id> -->
<!-- <secret_access_key>password</secret_access_key> -->
<!-- </s3> -->
<!-- </disks> -->
<!-- <policies> -->
<!-- <tiered> -->
<!-- <volumes> -->
<!-- <default> -->
<!-- <disk>default</disk> -->
<!-- </default> -->
<!-- <s3> -->
<!-- <disk>s3</disk> -->
<!-- </s3> -->
<!-- </volumes> -->
<!-- </tiered> -->
<!-- </policies> -->
<!-- </storage_configuration> -->
<!-- <storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration> -->
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->

View File

@@ -3,10 +3,17 @@ version: "2.4"
services:
clickhouse:
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
# ports:
# - "9000:9000"
# - "8123:8123"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
@@ -15,39 +22,53 @@ services:
retries: 3
alertmanager:
image: signoz/alertmanager:0.6.0
image: signoz/alertmanager:0.23.0-0.1
volumes:
- ./data/alertmanager:/data
depends_on:
- query-service
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8080
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.7.4
image: signoz/query-service:0.8.2
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-arm
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.7.4
image: signoz/frontend:0.8.2
container_name: frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
@@ -55,7 +76,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.43.0
image: signoz/otelcontribcol:0.45.1-0.3
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
@@ -63,7 +84,7 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8889:8889" # Prometheus metrics exposed by the agent
# - "13133" # health_check
# - "13133:13133" # health_check
# - "14268:14268" # Jaeger receiver
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zpages extension
@@ -76,7 +97,7 @@ services:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.43.0
image: signoz/otelcontribcol:0.45.1-0.3
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml

View File

@@ -3,10 +3,17 @@ version: "2.4"
services:
clickhouse:
image: yandex/clickhouse-server:21.12.3.32
# ports:
# - "9000:9000"
# - "8123:8123"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
@@ -15,40 +22,52 @@ services:
retries: 3
alertmanager:
image: signoz/alertmanager:0.6.0
image: signoz/alertmanager:0.23.0-0.1
volumes:
- ./data/alertmanager:/data
depends_on:
- query-service
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8080
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.7.4
image: signoz/query-service:0.8.2
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.7.4
image: signoz/frontend:0.8.2
container_name: frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
@@ -56,7 +75,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.43.0
image: signoz/otelcontribcol:0.45.1-0.3
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
@@ -64,7 +83,7 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8889:8889" # Prometheus metrics exposed by the agent
# - "13133" # health_check
# - "13133:13133" # health_check
# - "14268:14268" # Jaeger receiver
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zpages extension
@@ -77,7 +96,7 @@ services:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.43.0
image: signoz/otelcontribcol:0.45.1-0.3
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
@@ -87,15 +106,15 @@ services:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"

View File

@@ -28,6 +28,11 @@ processors:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 10000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -47,8 +52,8 @@ extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
@@ -61,11 +66,11 @@ service:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhouse]
exporters: [clickhousetraces]
metrics:
receivers: [otlp, hostmetrics]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
exporters: [prometheus]

View File

@@ -44,4 +44,4 @@ service:
metrics:
receivers: [otlp, prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite]

View File

@@ -1,7 +1,7 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
@@ -9,25 +9,29 @@ server {
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api/alertmanager{
proxy_pass http://alertmanager:9093/api/v2;
location /api/alertmanager {
proxy_pass http://alertmanager:9093/api/v2;
}
location /api {
proxy_pass http://query-service:8080/api;
proxy_pass http://query-service:8080/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
root /usr/share/nginx/html;
}
}

View File

@@ -1,273 +0,0 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/data
- coordinator_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_tiny/coordinator
- environment_tiny/common
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_tiny/broker
- environment_tiny/common
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/data
- historical_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_tiny/historical
- environment_tiny/common
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/data
- middle_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_tiny/middlemanager
- environment_tiny/common
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_tiny/router
- environment_tiny/common
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.4.0
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
container_name: query-service
depends_on:
router:
condition: service_healthy
ports:
- "8080:8080"
volumes:
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,269 +0,0 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/druid/deepStorage
- coordinator_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_small/coordinator
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_small/broker
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/druid/deepStorage
- historical_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_small/historical
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/druid/deepStorage
- middle_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_small/middlemanager
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_small/router
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.4.0
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
container_name: query-service
depends_on:
router:
condition: service_healthy
ports:
- "8080:8080"
volumes:
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3301:3301"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ./locust-scripts:/locust

View File

@@ -1 +0,0 @@
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]

View File

@@ -1,69 +0,0 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafka:9092"
}
}
}

View File

@@ -1,53 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=768m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=100MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,52 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,53 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=1280m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=200MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,53 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=1g
DRUID_XMS=1g
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=2g
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=200MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,52 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=128m
DRUID_XMS=128m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,52 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,26 +0,0 @@
# For S3 storage
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type=s3
# druid_storage_bucket=<s3-bucket-name>
# druid_storage_baseKey=druid/segments
# AWS_ACCESS_KEY_ID=<s3-access-id>
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
# AWS_REGION=<s3-aws-region>
# druid_indexer_logs_type=s3
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
# druid_indexer_logs_s3Prefix=druid/indexing-logs
# -----------------------------------------------------------
# For local storage
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_storage_type=local
druid_storage_storageDirectory=/opt/data/segments
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/data/indexing-logs

View File

@@ -1,49 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,49 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,50 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,49 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,51 +0,0 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
queued_retry:
num_workers: 4
queue_size: 100
retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
kafka/traces:
brokers:
- kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka/metrics]

View File

@@ -247,7 +247,7 @@ bye() { # Prints a friendly good bye message and exits the script.
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
if [[ email == "" ]]; then
if [[ $email == "" ]]; then
echo -e "\n📨 Please share your email to receive support with the installation"
read -rp 'Email: ' email
@@ -333,7 +333,6 @@ fi
# echo -e "👉 ${RED}Two ways to go forward\n"
# echo -e "${RED}1) ClickHouse as database (default)\n"
# echo -e "${RED}2) Kafka + Druid as datastore \n"
# read -p "⚙️ Enter your preference (1/2):" choice_setup
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
@@ -346,8 +345,6 @@ fi
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
# setup_type='clickhouse'
# else
# setup_type='druid'
# fi
setup_type='clickhouse'

View File

@@ -1,4 +1,5 @@
node_modules
.vscode
build
.env
.env
.git

View File

@@ -1,2 +1,3 @@
node_modules
build
build
*.typegen.ts

View File

@@ -101,12 +101,11 @@ module.exports = {
},
},
],
'@typescript-eslint/no-unused-vars': 'error',
// eslint rules need to remove
'no-shadow': 'off',
'@typescript-eslint/no-shadow': 'off',
'global-require': 'off',
'@typescript-eslint/no-var-requires': 'off',
'import/no-cycle': 'off',
'prettier/prettier': [

4
frontend/.husky/commit-msg Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && npm run commitlint

View File

@@ -1 +1 @@
12.13.0
16.15.0

View File

@@ -1,5 +1,5 @@
# stage1 as builder
FROM node:12.22.0 as builder
# Builder stage
FROM node:16.15.0-slim as builder
# Add Maintainer Info
LABEL maintainer="signoz"
@@ -9,24 +9,23 @@ ARG TARGETARCH
WORKDIR /frontend
# copy the package.json to install dependencies
# Copy the package.json to install dependencies
COPY package.json ./
# Install the dependencies and make the folder
RUN yarn install
RUN CI=1 yarn install
COPY . .
# Build the project and copy the files
RUN yarn build
FROM nginx:1.18-alpine
#!/bin/sh
FROM nginx:1.18-alpine
COPY conf/default.conf /etc/nginx/conf.d/default.conf
## Remove default nginx index page
# Remove default nginx index page
RUN rm -rf /usr/share/nginx/html/*
# Copy from the stahg 1
@@ -34,4 +33,4 @@ COPY --from=builder /frontend/build /usr/share/nginx/html
EXPOSE 3301
ENTRYPOINT ["nginx", "-g", "daemon off;"]
ENTRYPOINT ["nginx", "-g", "daemon off;"]

6
frontend/babel.config.js Normal file
View File

@@ -0,0 +1,6 @@
module.exports = {
presets: [
['@babel/preset-env', { targets: { node: 'current' } }],
'@babel/preset-typescript',
],
};

View File

@@ -0,0 +1 @@
module.exports = { extends: ['@commitlint/config-conventional'] };

View File

@@ -1,3 +0,0 @@
{
"video": false
}

View File

@@ -1,47 +0,0 @@
const Login = ({ email, name }: LoginProps): void => {
const emailInput = cy.findByPlaceholderText('mike@netflix.com');
emailInput.then((emailInput) => {
const element = emailInput[0];
// element is present
expect(element).not.undefined;
expect(element.nodeName).to.be.equal('INPUT');
});
emailInput.type(email).then((inputElements) => {
const inputElement = inputElements[0];
const inputValue = inputElement.getAttribute('value');
expect(inputValue).to.be.equals(email);
});
const firstNameInput = cy.findByPlaceholderText('Mike');
firstNameInput.then((firstNameInput) => {
const element = firstNameInput[0];
// element is present
expect(element).not.undefined;
expect(element.nodeName).to.be.equal('INPUT');
});
firstNameInput.type(name).then((inputElements) => {
const inputElement = inputElements[0];
const inputValue = inputElement.getAttribute('value');
expect(inputValue).to.be.equals(name);
});
const gettingStartedButton = cy.findByText('Get Started');
gettingStartedButton.click();
cy
.intercept('POST', '/api/v1/user?email*', {
statusCode: 200,
})
.as('defaultUser');
cy.wait('@defaultUser');
};
export interface LoginProps {
email: string;
name: string;
}
export default Login;

View File

@@ -1,49 +0,0 @@
import {
getDefaultOption,
getOptions,
} from 'container/Header/DateTimeSelection/config';
// import { AppState } from 'store/reducers';
const CheckRouteDefaultGlobalTimeOptions = ({
route,
}: CheckRouteDefaultGlobalTimeOptionsProps): void => {
cy.visit(Cypress.env('baseUrl') + route);
const allOptions = getOptions(route);
const defaultValue = getDefaultOption(route);
const defaultSelectedOption = allOptions.find((e) => e.value === defaultValue);
expect(defaultSelectedOption).not.undefined;
cy
.findAllByTestId('dropDown')
.find('span')
.then((el) => {
const elements = el.get();
const item = elements[1];
expect(defaultSelectedOption?.label).to.be.equals(
item.innerText,
'Default option is not matching',
);
});
// cy
// .window()
// .its('store')
// .invoke('getState')
// .then((e: AppState) => {
// const { globalTime } = e;
// const { maxTime, minTime } = globalTime;
// // @TODO match the global min time and max time according to the selected option
// });
};
export interface CheckRouteDefaultGlobalTimeOptionsProps {
route: string;
}
export default CheckRouteDefaultGlobalTimeOptions;

View File

@@ -1,11 +0,0 @@
const resizeObserverLoopErrRe = /ResizeObserver loop limit exceeded/;
const unCaughtExpection = (): void => {
cy.on('uncaught:exception', (err) => {
// returning false here prevents Cypress from
// failing the test
return !resizeObserverLoopErrRe.test(err.message);
});
};
export default unCaughtExpection;

View File

@@ -1,21 +0,0 @@
{
"data": [
{
"created_at": 1638083159246,
"data": "{}",
"id": 1,
"name": "First Channels",
"type": "slack",
"updated_at": 1638083159246
},
{
"created_at": 1638083159246,
"data": "{}",
"id": 2,
"name": "Second Channels",
"type": "Slack",
"updated_at": 1638083159246
}
],
"message": "Success"
}

View File

@@ -1,35 +0,0 @@
[
{
"serviceName": "frontend",
"p99": 1134610000,
"avgDuration": 744523000,
"numCalls": 267,
"callRate": 0.89,
"numErrors": 0,
"errorRate": 0,
"num4XX": 0,
"fourXXRate": 0
},
{
"serviceName": "customer",
"p99": 734422400,
"avgDuration": 348678530,
"numCalls": 267,
"callRate": 0.89,
"numErrors": 0,
"errorRate": 0,
"num4XX": 0,
"fourXXRate": 0
},
{
"serviceName": "driver",
"p99": 239234080,
"avgDuration": 204662290,
"numCalls": 267,
"callRate": 0.89,
"numErrors": 0,
"errorRate": 0,
"num4XX": 0,
"fourXXRate": 0
}
]

View File

@@ -1,28 +0,0 @@
{
"status": "success",
"data": {
"rules": [
{
"labels": { "severity": "warning" },
"annotations": {},
"state": "firing",
"name": "First Rule",
"id": 1
},
{
"labels": { "severity": "warning" },
"annotations": {},
"state": "firing",
"name": "Second Rule",
"id": 2
},
{
"labels": { "severity": "P0" },
"annotations": {},
"state": "firing",
"name": "Third Rule",
"id": 3
}
]
}
}

View File

@@ -1 +0,0 @@
{ "status": "success", "data": { "resultType": "matrix", "result": [] } }

View File

@@ -1,29 +0,0 @@
{
"status": "success",
"data": {
"resultType": "matrix",
"result": [
{
"metric": {},
"values": [
[1634741764.961, "0.9"],
[1634741824.961, "0.9"],
[1634741884.961, "0.8666666666666667"],
[1634741944.961, "1"],
[1634742004.961, "0.9166666666666666"],
[1634742064.961, "0.95"],
[1634742124.961, "0.9333333333333333"],
[1634742184.961, "0.95"],
[1634742244.961, "1.0333333333333334"],
[1634742304.961, "0.9333333333333333"],
[1634742364.961, "0.9166666666666666"],
[1634742424.961, "0.9"],
[1634742484.961, "1.0166666666666666"],
[1634742544.961, "0.8333333333333334"],
[1634742604.961, "0.9166666666666666"],
[1634742664.961, "0.95"]
]
}
]
}
}

View File

@@ -1,62 +0,0 @@
[
{
"timestamp": 1634742600000000000,
"p50": 720048500,
"p95": 924409540,
"p99": 974744300,
"numCalls": 48,
"callRate": 0.8,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742540000000000,
"p50": 712614000,
"p95": 955580700,
"p99": 1045595400,
"numCalls": 59,
"callRate": 0.98333335,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742480000000000,
"p50": 720842000,
"p95": 887187600,
"p99": 943676860,
"numCalls": 53,
"callRate": 0.8833333,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742420000000000,
"p50": 712287000,
"p95": 908505540,
"p99": 976507650,
"numCalls": 58,
"callRate": 0.96666664,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742360000000000,
"p50": 697125500,
"p95": 975581800,
"p99": 1190121900,
"numCalls": 54,
"callRate": 0.9,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742300000000000,
"p50": 711592500,
"p95": 880559900,
"p99": 1100105500,
"numCalls": 40,
"callRate": 0.6666667,
"numErrors": 0,
"errorRate": 0
}
]

View File

@@ -1,9 +0,0 @@
[
{
"p50": 710824000,
"p95": 1003231400,
"p99": 1231265500,
"numCalls": 299,
"name": "HTTP GET /dispatch"
}
]

View File

@@ -1,35 +0,0 @@
{
"items": {
"1644926280000000000": { "timestamp": 1644926280000000000, "value": 787 },
"1644926340000000000": { "timestamp": 1644926340000000000, "value": 2798 },
"1644926400000000000": { "timestamp": 1644926400000000000, "value": 2828 },
"1644926460000000000": { "timestamp": 1644926460000000000, "value": 2926 },
"1644926520000000000": { "timestamp": 1644926520000000000, "value": 2932 },
"1644926580000000000": { "timestamp": 1644926580000000000, "value": 2842 },
"1644926640000000000": { "timestamp": 1644926640000000000, "value": 2966 },
"1644926700000000000": { "timestamp": 1644926700000000000, "value": 2782 },
"1644926760000000000": { "timestamp": 1644926760000000000, "value": 2843 },
"1644926820000000000": { "timestamp": 1644926820000000000, "value": 2864 },
"1644926880000000000": { "timestamp": 1644926880000000000, "value": 2777 },
"1644926940000000000": { "timestamp": 1644926940000000000, "value": 2820 },
"1644927000000000000": { "timestamp": 1644927000000000000, "value": 2579 },
"1644927060000000000": { "timestamp": 1644927060000000000, "value": 2681 },
"1644927120000000000": { "timestamp": 1644927120000000000, "value": 2828 },
"1644927180000000000": { "timestamp": 1644927180000000000, "value": 2975 },
"1644927240000000000": { "timestamp": 1644927240000000000, "value": 2934 },
"1644927300000000000": { "timestamp": 1644927300000000000, "value": 2793 },
"1644927360000000000": { "timestamp": 1644927360000000000, "value": 2913 },
"1644927420000000000": { "timestamp": 1644927420000000000, "value": 2621 },
"1644927480000000000": { "timestamp": 1644927480000000000, "value": 2631 },
"1644927540000000000": { "timestamp": 1644927540000000000, "value": 2924 },
"1644927600000000000": { "timestamp": 1644927600000000000, "value": 2576 },
"1644927660000000000": { "timestamp": 1644927660000000000, "value": 2878 },
"1644927720000000000": { "timestamp": 1644927720000000000, "value": 2737 },
"1644927780000000000": { "timestamp": 1644927780000000000, "value": 2621 },
"1644927840000000000": { "timestamp": 1644927840000000000, "value": 2823 },
"1644927900000000000": { "timestamp": 1644927900000000000, "value": 3081 },
"1644927960000000000": { "timestamp": 1644927960000000000, "value": 2883 },
"1644928020000000000": { "timestamp": 1644928020000000000, "value": 2823 },
"1644928080000000000": { "timestamp": 1644928080000000000, "value": 455 }
}
}

View File

@@ -1,19 +0,0 @@
{
"serviceName": {
"customer": 1642,
"driver": 1642,
"frontend": 39408,
"mysql": 1642,
"redis": 22167,
"route": 16420
},
"status": { "error": 4105, "ok": 78816 },
"duration": { "maxDuration": 1253979000, "minDuration": 415000 },
"operation": {},
"httpCode": {},
"httpUrl": {},
"httpMethod": {},
"httpRoute": {},
"httpHost": {},
"component": {}
}

View File

@@ -1,105 +0,0 @@
{
"spans": [
{
"timestamp": "2022-02-15T12:16:09.542074Z",
"spanID": "303b39065c6f5df5",
"traceID": "00000000000000007fc49fab3cb75958",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 313418000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:08.84038Z",
"spanID": "557e8303bc802992",
"traceID": "000000000000000079310bd1d435a92b",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 318203000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:08.867689Z",
"spanID": "347113dd916dd20e",
"traceID": "00000000000000004c22c0409cee0f66",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 512810000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:07.060882Z",
"spanID": "0a8d07f72aa1339b",
"traceID": "0000000000000000488e11a35959de96",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 588705000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:07.134107Z",
"spanID": "0acd4ec344675998",
"traceID": "00000000000000000292efc7945d9bfa",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 801632000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:06.474095Z",
"spanID": "3ae72e433301822a",
"traceID": "00000000000000001ac3004ff1b7eefe",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 306650000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:06.996246Z",
"spanID": "1d765427af673039",
"traceID": "00000000000000002e78f59fabbcdecf",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 311469000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:05.324296Z",
"spanID": "0987c90d83298a1d",
"traceID": "0000000000000000077bcb960609a350",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 290680000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:02.458221Z",
"spanID": "5b0d0d403dd9acf4",
"traceID": "00000000000000007ae5b0aa69242556",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 262763000,
"httpCode": "200",
"httpMethod": "GET"
},
{
"timestamp": "2022-02-15T12:16:00.584939Z",
"spanID": "3beafb277a76b9b4",
"traceID": "00000000000000000ab44953c2fd949e",
"serviceName": "customer",
"operation": "HTTP GET /customer",
"durationNano": 302851000,
"httpCode": "200",
"httpMethod": "GET"
}
],
"totalSpans": 82921
}

View File

@@ -1,24 +0,0 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
describe('App Layout', () => {
beforeEach(() => {
cy.visit(Cypress.env('baseUrl'));
});
it('Check the user is in Logged Out State', async () => {
cy.location('pathname').then((e) => {
expect(e).to.be.equal(ROUTES.SIGN_UP);
});
});
it('Logged In State', () => {
const testEmail = 'test@test.com';
const firstName = 'Test';
cy.login({
email: testEmail,
name: firstName,
});
});
});

View File

@@ -1,52 +0,0 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
describe('Channels', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
});
it('Channels', () => {
cy
.intercept('**channels**', {
statusCode: 200,
fixture: 'defaultAllChannels',
})
.as('All Channels');
cy.wait('@All Channels');
cy
.get('.ant-tabs-tab')
.children()
.then((e) => {
const child = e.get();
const secondChild = child[1];
expect(secondChild.outerText).to.be.equals('Alert Channels');
expect(secondChild.ariaSelected).to.be.equals('true');
});
cy
.get('tbody')
.should('be.visible')
.then((e) => {
const allChildren = e.children().get();
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
allChildren.forEach((e, index) => {
expect(e.firstChild?.textContent).not.null;
expect(e.firstChild?.textContent).to.be.equals(
defaultAllChannels.data[index].name,
);
});
});
});
});

View File

@@ -1,44 +0,0 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
describe('default time', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
});
it('Metrics Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.APPLICATION,
});
});
it('Dashboard Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.ALL_DASHBOARD,
});
});
it('Trace Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.TRACE,
});
});
it('Instrumentation Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.INSTRUMENTATION,
});
});
it('Service Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.SERVICE_MAP,
});
});
it('Settings Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.SETTINGS,
});
});
});

View File

@@ -1,126 +0,0 @@
/// <reference types="cypress" />
import getGlobalDropDownFormatedDate from 'lib/getGlobalDropDownFormatedDate';
import { AppState } from 'store/reducers';
import topEndPoints from '../../fixtures/topEndPoints.json';
describe('Global Time Metrics Application', () => {
beforeEach(() => {
cy.visit(Cypress.env('baseUrl'));
const testEmail = 'test@test.com';
const firstName = 'Test';
cy.login({
email: testEmail,
name: firstName,
});
});
it('Metrics Application', async () => {
cy
.intercept('GET', '/api/v1/services*', {
fixture: 'defaultApp.json',
})
.as('defaultApps');
cy.wait('@defaultApps');
//clicking on frontend
cy.get('tr:nth-child(1) > td:first-child').click();
cy
.intercept('GET', '/api/v1/service/top_endpoints*', {
fixture: 'topEndPoints.json',
})
.as('topEndPoints');
cy
.intercept('GET', '/api/v1/service/overview?*', {
fixture: 'serviceOverview.json',
})
.as('serviceOverview');
cy
.intercept(
'GET',
`/api/v1/query_range?query=sum(rate(signoz_latency_count*`,
{
fixture: 'requestPerSecond.json',
},
)
.as('requestPerSecond');
cy
.window()
.its('store')
.invoke('getState')
.then((e: AppState) => {
const { globalTime } = e;
const { maxTime, minTime } = globalTime;
// intercepting metrics application call
cy.wait('@topEndPoints');
cy.wait('@serviceOverview');
//TODO add errorPercentage also
// cy.wait('@errorPercentage');
cy.wait('@requestPerSecond');
cy
.get('tbody tr:first-child td:first-child')
.then((el) => {
const elements = el.get();
expect(elements.length).to.be.equals(1);
const element = elements[0];
expect(element.innerText).to.be.equals(topEndPoints[0].name);
})
.click();
cy
.findAllByTestId('dropDown')
.find('span.ant-select-selection-item')
.then((e) => {
const elements = e;
const element = elements[0];
const customSelectedTime = element.innerText;
const startTime = new Date(minTime / 1000000);
const endTime = new Date(maxTime / 1000000);
const startString = getGlobalDropDownFormatedDate(startTime);
const endString = getGlobalDropDownFormatedDate(endTime);
const result = `${startString} - ${endString}`;
expect(customSelectedTime).to.be.equals(result);
});
cy
.findByTestId('dropDown')
.click()
.then(() => {
cy.findByTitle('Last 30 min').click();
});
cy
.findByTestId('dropDown')
.find('span.ant-select-selection-item')
.then((e) => {
const elements = e;
const element = elements[0];
const selectedTime = element.innerText;
expect(selectedTime).to.be.equals('Last 30 min');
});
});
});
});

View File

@@ -1,67 +0,0 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond';
import defaultApps from '../../fixtures/defaultApp.json';
describe('Metrics', () => {
beforeEach(() => {
cy.visit(Cypress.env('baseUrl'));
const testEmail = 'test@test.com';
const firstName = 'Test';
cy.login({
email: testEmail,
name: firstName,
});
});
it('Default Apps', () => {
cy
.intercept('GET', '/api/v1/services*', {
fixture: 'defaultApp.json',
})
.as('defaultApps');
cy.wait('@defaultApps');
cy.location().then((e) => {
expect(e.pathname).to.be.equals(ROUTES.APPLICATION);
cy.get('tbody').then((elements) => {
const trElements = elements.children();
expect(trElements.length).to.be.equal(defaultApps.length);
const getChildren = (row: Element): Element => {
if (row.children.length === 0) {
return row;
}
return getChildren(row.children[0]);
};
// this is row element
trElements.map((index, element) => {
const [
applicationElement,
p99Element,
errorRateElement,
rpsElement,
] = element.children;
const applicationName = getChildren(applicationElement).innerHTML;
const p99Name = getChildren(p99Element).innerHTML;
const errorRateName = getChildren(errorRateElement).innerHTML;
const rpsName = getChildren(rpsElement).innerHTML;
const { serviceName, p99, errorRate, callRate } = defaultApps[index];
expect(applicationName).to.be.equal(serviceName);
expect(p99Name).to.be.equal(convertToNanoSecondsToSecond(p99).toString());
expect(errorRateName).to.be.equals(
parseFloat(errorRate.toString()).toFixed(2),
);
expect(rpsName).to.be.equals(callRate.toString());
});
});
});
});
});
export {};

View File

@@ -1,130 +0,0 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
import defaultRules from '../../fixtures/defaultRules.json';
const defaultRuleRoutes = `**/rules/**`;
describe('Alerts', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
cy
.intercept('get', '*rules*', {
fixture: 'defaultRules',
})
.as('defaultRules');
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
cy.wait('@defaultRules');
});
it('Edit Rules Page Failure', async () => {
cy
.intercept(defaultRuleRoutes, {
statusCode: 500,
})
.as('Get Rules Error');
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
const firstDelete = e[0];
firstDelete.click();
cy.waitFor('@Get Rules Error');
cy
.window()
.location()
.then((e) => {
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
});
cy.findByText('Something went wrong').then((e) => {
expect(e.length).to.be.equals(1);
});
});
});
it('Edit Rules Page Success', async () => {
const text = 'this is the sample value';
cy
.intercept(defaultRuleRoutes, {
statusCode: 200,
body: {
data: {
data: text,
},
},
})
.as('Get Rules Success');
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
const firstDelete = e[0];
firstDelete.click();
cy.waitFor('@Get Rules Success');
cy.wait(1000);
cy.findByText('Save').then((e) => {
const [el] = e.get();
el.click();
});
});
});
it('All Rules are rendered correctly', async () => {
cy
.window()
.location()
.then(({ pathname }) => {
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
cy.get('tbody').then((e) => {
const tarray = e.children().get();
expect(tarray.length).to.be.equals(3);
tarray.forEach(({ children }, index) => {
const name = children[1]?.textContent;
const label = children[2]?.textContent;
expect(name).to.be.equals(defaultRules.data.rules[index].name);
const defaultLabels = defaultRules.data.rules[index].labels;
expect(label).to.be.equals(defaultLabels['severity']);
});
});
});
});
it('Rules are Deleted', async () => {
cy
.intercept(defaultRuleRoutes, {
body: {
data: 'Deleted',
message: 'Success',
},
statusCode: 200,
})
.as('deleteRules');
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
const firstDelete = e[0];
firstDelete.click();
});
cy.wait('@deleteRules');
cy.get('tbody').then((e) => {
const trray = e.children().get();
expect(trray.length).to.be.equals(2);
});
});
});

View File

@@ -1,160 +0,0 @@
/* eslint-disable sonarjs/no-duplicate-string */
import ROUTES from 'constants/routes';
import { AppState } from 'store/reducers';
import { TraceFilterEnum } from 'types/reducer/trace';
import GraphInitialResponse from '../../fixtures/trace/initialAggregates.json';
import FilterInitialResponse from '../../fixtures/trace/initialSpanFilter.json';
import TableInitialResponse from '../../fixtures/trace/initialSpans.json';
const allFilters = '@Filters.all';
const allGraphs = '@Graph.all';
const allTable = '@Table.all';
describe('Trace', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
cy
.intercept('POST', '**/aggregates', {
fixture: 'trace/initialAggregates',
})
.as('Graph');
cy
.intercept('POST', '**/getFilteredSpans', {
fixture: 'trace/initialSpans',
})
.as('Table');
cy
.intercept('POST', '**/api/v1/getSpanFilters', {
fixture: 'trace/initialSpanFilter',
})
.as('Filters');
cy.visit(Cypress.env('baseUrl') + `${ROUTES.TRACE}`);
});
it('First Initial Load should go with 3 AJAX request', () => {
cy.wait(['@Filters', '@Graph', '@Table']).then((e) => {
const [filter, graph, table] = e;
const { body: filterBody } = filter.request;
const { body: graphBody } = graph.request;
const { body: tableBody } = table.request;
expect(filterBody.exclude.length).to.equal(0);
expect(filterBody.getFilters.length).to.equal(3);
filterBody.getFilters.forEach((filter: TraceFilterEnum) => {
expect(filter).to.be.oneOf(['duration', 'status', 'serviceName']);
});
expect(graphBody.function).to.be.equal('count');
expect(graphBody.exclude.length).to.be.equal(0);
expect(typeof graphBody.exclude).to.be.equal('object');
expect(tableBody.tags.length).to.be.equal(0);
expect(typeof tableBody.tags).equal('object');
expect(tableBody.exclude.length).equals(0);
});
});
it('Render Time Request Response In All 3 Request', () => {
cy.wait(['@Filters', '@Graph', '@Table']).then((e) => {
const [filter, graph, table] = e;
expect(filter.response?.body).to.be.not.undefined;
expect(filter.response?.body).to.be.not.NaN;
expect(JSON.stringify(filter.response?.body)).to.be.equals(
JSON.stringify(FilterInitialResponse),
);
expect(JSON.stringify(graph.response?.body)).to.be.equals(
JSON.stringify(GraphInitialResponse),
);
expect(JSON.stringify(table.response?.body)).to.be.equals(
JSON.stringify(TableInitialResponse),
);
});
cy.get(allFilters).should('have.length', 1);
cy.get(allGraphs).should('have.length', 1);
cy.get(allTable).should('have.length', 1);
});
it('Clear All', () => {
cy.wait(['@Filters', '@Graph', '@Table']);
expect(cy.findAllByText('Clear All')).not.to.be.undefined;
cy
.window()
.its('store')
.invoke('getState')
.then((e: AppState) => {
const { traces } = e;
expect(traces.isFilterExclude.get('status')).to.be.undefined;
expect(traces.selectedFilter.size).to.be.equals(0);
});
cy.findAllByText('Clear All').then((e) => {
const [firstStatusClear] = e;
firstStatusClear.click();
cy.wait(['@Filters', '@Graph', '@Table']);
// insuring the api get call
cy.get(allFilters).should('have.length', 2);
cy.get(allGraphs).should('have.length', 2);
cy.get(allTable).should('have.length', 2);
cy
.window()
.its('store')
.invoke('getState')
.then((e: AppState) => {
const { traces } = e;
expect(traces.isFilterExclude.get('status')).to.be.equals(false);
expect(traces.userSelectedFilter.get('status')).to.be.undefined;
expect(traces.selectedFilter.size).to.be.equals(0);
});
});
});
it('Un Selecting one option from status', () => {
cy.wait(['@Filters', '@Graph', '@Table']);
cy.get('input[type="checkbox"]').then((e) => {
const [errorCheckbox] = e;
errorCheckbox.click();
cy.wait(['@Filters', '@Graph', '@Table']).then((e) => {
const [filter, graph, table] = e;
const filterBody = filter.request.body;
const graphBody = graph.request.body;
const tableBody = table.request.body;
expect(filterBody.exclude).not.to.be.undefined;
expect(filterBody.exclude.length).not.to.be.equal(0);
expect(filterBody.exclude[0] === 'status').to.be.true;
expect(graphBody.exclude).not.to.be.undefined;
expect(graphBody.exclude.length).not.to.be.equal(0);
expect(graphBody.exclude[0] === 'status').to.be.true;
expect(tableBody.exclude).not.to.be.undefined;
expect(tableBody.exclude.length).not.to.be.equal(0);
expect(tableBody.exclude[0] === 'status').to.be.true;
});
cy.get(allFilters).should('have.length', 2);
cy.get(allGraphs).should('have.length', 2);
cy.get(allTable).should('have.length', 2);
});
});
});

View File

@@ -1,26 +0,0 @@
/// <reference types="cypress" />
// ***********************************************************
// This example plugins/index.js can be used to load plugins
//
// You can change the location of this file or turn off loading
// the plugins file with the 'pluginsFile' configuration option.
//
// You can read more here:
// https://on.cypress.io/plugins-guide
// ***********************************************************
// This function is called when a project is opened or re-opened (e.g. due to
// the project's config changing)
// cypress/plugins/index.ts
/// <reference types="cypress" />
/**
* @type {Cypress.PluginConfig}
*/
module.exports = (): void => {
return undefined;
};
export {};

View File

@@ -1,24 +0,0 @@
import '@testing-library/cypress/add-commands';
import CheckRouteDefaultGlobalTimeOptions, {
CheckRouteDefaultGlobalTimeOptionsProps,
} from '../CustomFunctions/checkRouteDefaultGlobalTimeOptions';
import Login, { LoginProps } from '../CustomFunctions/Login';
Cypress.Commands.add('login', Login);
Cypress.Commands.add(
'checkDefaultGlobalOption',
CheckRouteDefaultGlobalTimeOptions,
);
declare global {
// eslint-disable-next-line @typescript-eslint/no-namespace
namespace Cypress {
interface Chainable {
login(props: LoginProps): void;
checkDefaultGlobalOption(
props: CheckRouteDefaultGlobalTimeOptionsProps,
): void;
}
}
}

View File

@@ -1,20 +0,0 @@
// ***********************************************************
// This example support/index.js is processed and
// loaded automatically before your test files.
//
// This is a great place to put global configuration and
// behavior that modifies Cypress.
//
// You can change the location of this file or turn off
// automatically serving support files with the
// 'supportFile' configuration option.
//
// You can read more here:
// https://on.cypress.io/configuration
// ***********************************************************
// Import commands.js using ES2015 syntax:
import './commands';
// Alternatively you can use CommonJS syntax:
// require('./commands')

View File

@@ -1,13 +0,0 @@
{
"extends": "../tsconfig.json",
"target": "es5",
"lib": ["es5", "dom"],
"compilerOptions": {
"noEmit": true,
// be explicit about types included
// to avoid clashing with Jest types
"types": ["cypress", "@testing-library/cypress", "node"],
"isolatedModules": false
},
"include": ["../node_modules/cypress", "./**/*.ts"]
}

View File

@@ -9,15 +9,27 @@ const config: Config.InitialOptions = {
moduleNameMapper: {
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
},
notify: true,
notifyMode: 'always',
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
transform: {
'\\.(js|jsx|ts|tsx)?$': 'babel-jest',
globals: {
extensionsToTreatAsEsm: ['.ts'],
'ts-jest': {
useESM: true,
},
},
testMatch: ['<rootDir>/src/**/?(*.)(test).(ts|js)?(x)'],
preset: 'ts-jest/presets/js-with-ts-esm',
transform: {
'^.+\\.(ts|tsx)?$': 'ts-jest',
'^.+\\.(js|jsx)$': 'babel-jest',
},
transformIgnorePatterns: ['node_modules/(?!(lodash-es)/)'],
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'],
moduleDirectories: ['node_modules', 'src'],
testEnvironmentOptions: {
'jest-playwright': {
browsers: ['chromium', 'firefox', 'webkit'],
},
},
};
export default config;

View File

@@ -9,16 +9,17 @@
"prettify": "prettier --write .",
"lint": "eslint ./src",
"lint:fix": "eslint ./src --fix",
"cypress:open": "cypress open",
"cypress:run": "cypress run",
"jest": "jest",
"jest:coverage": "jest --coverage",
"jest:watch": "jest --watch",
"postinstall": "yarn husky:configure",
"husky:configure": "cd .. && husky install frontend/.husky"
"postinstall": "is-ci || yarn husky:configure",
"playwright": "playwright test --config=./playwright.config.ts",
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
"commitlint": "commitlint --edit $1"
},
"engines": {
"node": ">=12.13.0"
"node": ">=16.15.0"
},
"author": "",
"license": "ISC",
@@ -31,6 +32,7 @@
"@testing-library/react": "^11.1.0",
"@testing-library/user-event": "^12.1.10",
"@welldone-software/why-did-you-render": "^6.2.1",
"@xstate/react": "^3.0.0",
"antd": "4.19.2",
"axios": "^0.21.0",
"babel-eslint": "^10.1.0",
@@ -45,7 +47,6 @@
"cross-env": "^7.0.3",
"css-loader": "4.3.0",
"css-minimizer-webpack-plugin": "^3.2.0",
"cypress": "^8.3.0",
"d3": "^6.2.0",
"d3-flame-graph": "^3.1.1",
"d3-tip": "^0.9.1",
@@ -57,7 +58,8 @@
"i18next": "^21.6.12",
"i18next-browser-languagedetector": "^6.1.3",
"i18next-http-backend": "^1.3.2",
"jest": "26.6.0",
"jest": "^27.5.1",
"js-base64": "^3.7.2",
"less": "^4.1.2",
"less-loader": "^10.2.0",
"lodash-es": "^4.17.21",
@@ -66,8 +68,9 @@
"react-dom": "17.0.0",
"react-force-graph": "^1.41.0",
"react-graph-vis": "^1.0.5",
"react-grid-layout": "^1.2.5",
"react-grid-layout": "^1.3.4",
"react-i18next": "^11.16.1",
"react-query": "^3.34.19",
"react-redux": "^7.2.2",
"react-router-dom": "^5.2.0",
"react-use": "^17.3.2",
@@ -84,7 +87,8 @@
"uuid": "^8.3.2",
"web-vitals": "^0.2.4",
"webpack": "^5.23.0",
"webpack-dev-server": "^4.3.1"
"webpack-dev-server": "^4.3.1",
"xstate": "^4.31.0"
},
"browserslist": {
"production": [
@@ -105,14 +109,17 @@
"@babel/preset-env": "^7.12.17",
"@babel/preset-react": "^7.12.13",
"@babel/preset-typescript": "^7.12.17",
"@commitlint/cli": "^16.2.4",
"@commitlint/config-conventional": "^16.2.4",
"@jest/globals": "^27.5.1",
"@testing-library/cypress": "^8.0.0",
"@playwright/test": "^1.22.0",
"@testing-library/react-hooks": "^7.0.2",
"@types/color": "^3.0.3",
"@types/compression-webpack-plugin": "^9.0.0",
"@types/copy-webpack-plugin": "^8.0.1",
"@types/d3": "^6.2.0",
"@types/d3-tip": "^3.5.5",
"@types/jest": "^26.0.15",
"@types/jest": "^27.5.1",
"@types/lodash-es": "^4.17.4",
"@types/mini-css-extract-plugin": "^2.5.1",
"@types/node": "^16.10.3",
@@ -131,7 +138,7 @@
"@typescript-eslint/parser": "^4.28.2",
"autoprefixer": "^9.0.0",
"babel-plugin-styled-components": "^1.12.0",
"compression-webpack-plugin": "^9.0.0",
"compression-webpack-plugin": "9.0.0",
"copy-webpack-plugin": "^8.1.0",
"critters-webpack-plugin": "^3.0.1",
"eslint": "^7.30.0",
@@ -150,19 +157,26 @@
"eslint-plugin-simple-import-sort": "^7.0.0",
"eslint-plugin-sonarjs": "^0.12.0",
"husky": "^7.0.4",
"is-ci": "^3.0.1",
"jest-playwright-preset": "^1.7.0",
"less-plugin-npm-import": "^2.1.0",
"lint-staged": "^12.3.7",
"portfinder-sync": "^0.0.2",
"prettier": "2.2.1",
"react-hot-loader": "^4.13.0",
"ts-jest": "^27.1.4",
"ts-node": "^10.2.1",
"typescript-plugin-css-modules": "^3.4.0",
"webpack-bundle-analyzer": "^4.5.0",
"webpack-cli": "^4.5.0"
"webpack-cli": "^4.9.2"
},
"lint-staged": {
"*.(js|jsx|ts|tsx)": [
"eslint --fix"
]
},
"resolutions": {
"@types/react": "17.0.0",
"@types/react-dom": "17.0.0"
}
}

View File

@@ -0,0 +1,21 @@
import { PlaywrightTestConfig } from '@playwright/test';
import dotenv from 'dotenv';
dotenv.config();
const config: PlaywrightTestConfig = {
forbidOnly: !!process.env.CI,
retries: process.env.CI ? 2 : 0,
preserveOutput: 'always',
name: 'Signoz',
testDir: './tests',
use: {
trace: 'retain-on-failure',
baseURL: process.env.FRONTEND_API_ENDPOINT,
},
updateSnapshots: 'all',
fullyParallel: false,
quiet: true,
};
export default config;

View File

@@ -0,0 +1,48 @@
{
"page_title_create": "New Notification Channels",
"page_title_edit": "Edit Notification Channels",
"button_save_channel": "Save",
"button_test_channel": "Test",
"button_return": "Back",
"field_channel_name": "Name",
"field_channel_type": "Type",
"field_webhook_url": "Webhook URL",
"field_slack_recipient": "Recipient",
"field_slack_title": "Title",
"field_slack_description": "Description",
"field_webhook_username": "User Name (optional)",
"field_webhook_password": "Password (optional)",
"field_pager_routing_key": "Routing Key",
"field_pager_description": "Description",
"field_pager_severity": "Severity",
"field_pager_details": "Additional Information",
"field_pager_component": "Component",
"field_pager_group": "Group",
"field_pager_class": "Class",
"field_pager_client": "Client",
"field_pager_client_url": "Client URL",
"placeholder_slack_description": "Description",
"placeholder_pager_description": "Description",
"help_pager_client": "Shows up as event source in Pagerduty",
"help_pager_client_url": "Shows up as event source link in Pagerduty",
"help_pager_class": "The class/type of the event",
"help_pager_details": "Specify a key-value format (must be a valid json)",
"help_pager_group": "A cluster or grouping of sources",
"help_pager_component": "The part or component of the affected system that is broke",
"help_pager_severity": "Severity of the incident, must be one of: must be one of the following: 'critical', 'warning', 'error' or 'info'",
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
"help_webhook_password": "Specify a password or bearer token",
"help_pager_description": "Shows up as description in pagerduty",
"channel_creation_done": "Successfully created the channel",
"channel_creation_failed": "An unexpected error occurred while creating this channel",
"channel_edit_done": "Channels Edited Successfully",
"channel_edit_failed": "An unexpected error occurred while updating this channel",
"selected_channel_invalid": "Channel type selected is invalid",
"username_no_password": "A Password must be provided with user name",
"test_unsupported": "Sorry, this channel type does not support test yet",
"channel_test_done": "An alert has been sent to this channel",
"channel_test_failed": "Failed to send a test message to this channel, please confirm that the parameters are set correctly",
"channel_test_unexpected": "An unexpected error occurred while sending a message to this channel, please try again",
"webhook_url_required": "Webhook URL is mandatory",
"slack_channel_help": "Specify channel or user, use #channel-name, @username (has to be all lowercase, no whitespace)"
}

View File

@@ -0,0 +1,10 @@
{
"something_went_wrong": "Something went wrong",
"already_logged_in": "Already Logged In",
"success": "Success",
"cancel": "Cancel",
"share": "Share",
"save": "Save",
"edit": "Edit",
"logged_in": "Logged In"
}

View File

@@ -0,0 +1,16 @@
{
"create_dashboard": "Create Dashboard",
"import_json": "Import JSON",
"copy_to_clipboard": "Copy To ClipBoard",
"download_json": "Download JSON",
"view_json": "View JSON",
"export_dashboard": "Export this dashboard.",
"upload_json_file": "Upload JSON file",
"paste_json_below": "Paste JSON below",
"error_upload_json": "Invalid JSON",
"load_json": "Load JSON",
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
"error_loading_json": "Error loading JSON file",
"empty_json_not_allowed": "Empty JSON is not allowed",
"new_dashboard_title": "Sample Title"
}

View File

@@ -0,0 +1,7 @@
{
"see_trace_graph": "See what happened before and after this error in a trace graph",
"see_error_in_trace_graph": "See the error in trace graph",
"stack_trace": "Stacktrace",
"older": "Older",
"newer": "Newer"
}

View File

@@ -0,0 +1,21 @@
{
"total_retention_period": "Total Retention Period",
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
"status_message": {
"success": "Your last call to change retention period to {{total_retention}} {{s3_part}} was successful.",
"failed": "Your last call to change retention period to {{total_retention}} {{s3_part}} failed. Please try again.",
"pending": "Your last call to change retention period to {{total_retention}} {{s3_part}} is pending. This may take some time.",
"s3_part": "and S3 to {{s3_retention}}"
},
"retention_save_button": {
"pending": "Updating {{name}} retention period",
"success": "Save"
},
"retention_request_race_condition": "Your request to change retention period has failed, as another request is still in process.",
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
"retention_comparison_error": "Total retention period for {{name}} cant be lower or equal to the period after which data is moved to s3.",
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
"retention_confirmation": "Are you sure you want to change the retention period?",
"retention_confirmation_description": "This will change the amount of storage needed for saving {{name}}."
}

View File

@@ -0,0 +1,13 @@
{
"display_name": "Display Name",
"signoz": "SigNoz",
"email_address": "Email address",
"name_optional": "Name (optional)",
"role": "Role",
"email_placeholder": "john@signoz.io",
"name_placeholder": "John",
"add_another_team_member": "Add another team member",
"invite_team_members": "Invite team members",
"invite_members": "Invite Members",
"pending_invites": "Pending Invites"
}

View File

@@ -0,0 +1,9 @@
{
"general": "General",
"alert_channels": "Alert Channels",
"organization_settings": "Organization Settings",
"my_settings": "My Settings",
"overview_metrics": "Overview Metrics",
"dbcall_metrics": "Database Calls",
"external_metrics": "External Calls"
}

View File

@@ -0,0 +1,5 @@
{
"current_password": "Current Password",
"new_password": "New Password",
"change_password": "Change Password"
}

View File

@@ -0,0 +1,17 @@
{
"monitor_signup": "Monitor your applications. Find what is causing issues.",
"version": "Version",
"latest_version": "Latest version",
"current_version": "Current version",
"release_notes": "Release Notes",
"read_how_to_upgrade": "Read instructions on how to upgrade",
"latest_version_signoz": "You are running the latest version of SigNoz.",
"stale_version": "You are on an older version and may be loosing on the latest features we have shipped. We recommend to upgrade to the latest version",
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
"n_a": "N/A",
"routes": {
"general": "General",
"alert_channels": "Alert Channels",
"all_errors": "All Exceptions"
}
}

View File

@@ -0,0 +1,48 @@
{
"page_title_create": "New Notification Channels",
"page_title_edit": "Edit Notification Channels",
"button_save_channel": "Save",
"button_test_channel": "Test",
"button_return": "Back",
"field_channel_name": "Name",
"field_channel_type": "Type",
"field_webhook_url": "Webhook URL",
"field_slack_recipient": "Recipient",
"field_slack_title": "Title",
"field_slack_description": "Description",
"field_webhook_username": "User Name (optional)",
"field_webhook_password": "Password (optional)",
"field_pager_routing_key": "Routing Key",
"field_pager_description": "Description",
"field_pager_severity": "Severity",
"field_pager_details": "Additional Information",
"field_pager_component": "Component",
"field_pager_group": "Group",
"field_pager_class": "Class",
"field_pager_client": "Client",
"field_pager_client_url": "Client URL",
"placeholder_slack_description": "Description",
"placeholder_pager_description": "Description",
"help_pager_client": "Shows up as event source in Pagerduty",
"help_pager_client_url": "Shows up as event source link in Pagerduty",
"help_pager_class": "The class/type of the event",
"help_pager_details": "Specify a key-value format (must be a valid json)",
"help_pager_group": "A cluster or grouping of sources",
"help_pager_component": "The part or component of the affected system that is broke",
"help_pager_severity": "Severity of the incident, must be one of: must be one of the following: 'critical', 'warning', 'error' or 'info'",
"help_webhook_username": "Leave empty for bearer auth or when authentication is not necessary.",
"help_webhook_password": "Specify a password or bearer token",
"help_pager_description": "Shows up as description in pagerduty",
"channel_creation_done": "Successfully created the channel",
"channel_creation_failed": "An unexpected error occurred while creating this channel",
"channel_edit_done": "Channels Edited Successfully",
"channel_edit_failed": "An unexpected error occurred while updating this channel",
"selected_channel_invalid": "Channel type selected is invalid",
"username_no_password": "A Password must be provided with user name",
"test_unsupported": "Sorry, this channel type does not support test yet",
"channel_test_done": "An alert has been sent to this channel",
"channel_test_failed": "Failed to send a test message to this channel, please confirm that the parameters are set correctly",
"channel_test_unexpected": "An unexpected error occurred while sending a message to this channel, please try again",
"webhook_url_required": "Webhook URL is mandatory",
"slack_channel_help": "Specify channel or user, use #channel-name, @username (has to be all lowercase, no whitespace)"
}

View File

@@ -0,0 +1,10 @@
{
"something_went_wrong": "Something went wrong",
"already_logged_in": "Already Logged In",
"success": "Success",
"cancel": "Cancel",
"share": "Share",
"save": "Save",
"edit": "Edit",
"logged_in": "Logged In"
}

View File

@@ -0,0 +1,16 @@
{
"create_dashboard": "Create Dashboard",
"import_json": "Import JSON",
"copy_to_clipboard": "Copy To ClipBoard",
"download_json": "Download JSON",
"view_json": "View JSON",
"export_dashboard": "Export this dashboard.",
"upload_json_file": "Upload JSON file",
"paste_json_below": "Paste JSON below",
"error_upload_json": "Invalid JSON",
"load_json": "Load JSON",
"import_dashboard_by_pasting": "Import dashboard by pasting JSON or importing JSON file",
"error_loading_json": "Error loading JSON file",
"empty_json_not_allowed": "Empty JSON is not allowed",
"new_dashboard_title": "Sample Title"
}

View File

@@ -0,0 +1,7 @@
{
"see_trace_graph": "See what happened before and after this error in a trace graph",
"see_error_in_trace_graph": "See the error in trace graph",
"stack_trace": "Stacktrace",
"older": "Older",
"newer": "Newer"
}

View File

@@ -0,0 +1,21 @@
{
"total_retention_period": "Total Retention Period",
"move_to_s3": "Move to S3\n(should be lower than total retention period)",
"status_message": {
"success": "Your last call to change retention period to {{total_retention}} {{s3_part}} was successful.",
"failed": "Your last call to change retention period to {{total_retention}} {{s3_part}} failed. Please try again.",
"pending": "Your last call to change retention period to {{total_retention}} {{s3_part}} is pending. This may take some time.",
"s3_part": "and S3 to {{s3_retention}}"
},
"retention_save_button": {
"pending": "Updating {{name}} retention period",
"success": "Save"
},
"retention_request_race_condition": "Your request to change retention period has failed, as another request is still in process.",
"retention_error_message": "There was an issue in changing the retention period for {{name}}. Please try again or reach out to support@signoz.io",
"retention_failed_message": "There was an issue in changing the retention period. Please try again or reach out to support@signoz.io",
"retention_comparison_error": "Total retention period for {{name}} cant be lower or equal to the period after which data is moved to s3.",
"retention_null_value_error": "Retention Period for {{name}} is not set yet. Please set by choosing below",
"retention_confirmation": "Are you sure you want to change the retention period?",
"retention_confirmation_description": "This will change the amount of storage needed for saving {{name}}."
}

View File

@@ -0,0 +1,13 @@
{
"display_name": "Display Name",
"signoz": "SigNoz",
"email_address": "Email address",
"name_optional": "Name (optional)",
"role": "Role",
"email_placeholder": "john@signoz.io",
"name_placeholder": "John",
"add_another_team_member": "Add another team member",
"invite_team_members": "Invite team members",
"invite_members": "Invite Members",
"pending_invites": "Pending Invites"
}

View File

@@ -0,0 +1,9 @@
{
"general": "General",
"alert_channels": "Alert Channels",
"organization_settings": "Organization Settings",
"my_settings": "My Settings",
"overview_metrics": "Overview Metrics",
"dbcall_metrics": "Database Calls",
"external_metrics": "External Calls"
}

View File

@@ -0,0 +1,6 @@
{
"current_password": "Current Password",
"new_password": "New Password",
"change_password": "Change Password",
"input_password": "input password"
}

View File

@@ -1,3 +1,17 @@
{
"monitor_signup": "Monitor your applications. Find what is causing issues."
"monitor_signup": "Monitor your applications. Find what is causing issues.",
"version": "Version",
"latest_version": "Latest version",
"current_version": "Current version",
"release_notes": "Release Notes",
"read_how_to_upgrade": "Read instructions on how to upgrade",
"latest_version_signoz": "You are running the latest version of SigNoz.",
"stale_version": "You are on an older version and may be loosing on the latest features we have shipped. We recommend to upgrade to the latest version",
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
"n_a": "N/A",
"routes": {
"general": "General",
"alert_channels": "Alert Channels",
"all_errors": "All Exceptions"
}
}

View File

@@ -1,5 +1,4 @@
<svg width="2130" height="600" viewBox="0 0 2130 600" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M765.131 338.922L805.631 334.984C808.068 348.578 812.99 358.562 820.396 364.938C827.896 371.312 837.974 374.5 850.631 374.5C864.037 374.5 874.115 371.688 880.865 366.062C887.709 360.344 891.131 353.688 891.131 346.094C891.131 341.219 889.677 337.094 886.771 333.719C883.959 330.25 878.99 327.25 871.865 324.719C866.99 323.031 855.881 320.031 838.537 315.719C816.224 310.188 800.568 303.391 791.568 295.328C778.912 283.984 772.584 270.156 772.584 253.844C772.584 243.344 775.537 233.547 781.443 224.453C787.443 215.266 796.021 208.281 807.177 203.5C818.427 198.719 831.974 196.328 847.818 196.328C873.693 196.328 893.146 202 906.177 213.344C919.302 224.688 926.193 239.828 926.849 258.766L885.224 260.594C883.443 250 879.599 242.406 873.693 237.812C867.881 233.125 859.115 230.781 847.396 230.781C835.302 230.781 825.834 233.266 818.99 238.234C814.584 241.422 812.381 245.688 812.381 251.031C812.381 255.906 814.443 260.078 818.568 263.547C823.818 267.953 836.568 272.547 856.818 277.328C877.068 282.109 892.021 287.078 901.677 292.234C911.427 297.297 919.021 304.281 924.459 313.188C929.99 322 932.756 332.922 932.756 345.953C932.756 357.766 929.474 368.828 922.912 379.141C916.349 389.453 907.068 397.141 895.068 402.203C883.068 407.172 868.115 409.656 850.209 409.656C824.146 409.656 804.131 403.656 790.162 391.656C776.193 379.562 767.849 361.984 765.131 338.922ZM967.49 236.406V199.844H1007.01V236.406H967.49ZM967.49 406V256.656H1007.01V406H967.49ZM1043.99 415.844L1089.13 421.328C1089.88 426.578 1091.61 430.188 1094.33 432.156C1098.08 434.969 1103.99 436.375 1112.05 436.375C1122.36 436.375 1130.1 434.828 1135.26 431.734C1138.72 429.672 1141.35 426.344 1143.13 421.75C1144.35 418.469 1144.96 412.422 1144.96 403.609V381.812C1133.15 397.938 1118.24 406 1100.24 406C1080.18 406 1064.29 397.516 1052.57 380.547C1043.38 367.141 1038.79 350.453 1038.79 330.484C1038.79 305.453 1044.79 286.328 1056.79 273.109C1068.88 259.891 1083.88 253.281 1101.79 253.281C1120.26 253.281 1135.49 261.391 1147.49 277.609V256.656H1184.47V390.672C1184.47 408.297 1183.02 421.469 1180.11 430.188C1177.21 438.906 1173.13 445.75 1167.88 450.719C1162.63 455.688 1155.6 459.578 1146.79 462.391C1138.07 465.203 1127.01 466.609 1113.6 466.609C1088.29 466.609 1070.33 462.25 1059.74 453.531C1049.15 444.906 1043.85 433.938 1043.85 420.625C1043.85 419.312 1043.9 417.719 1043.99 415.844ZM1079.29 328.234C1079.29 344.078 1082.33 355.703 1088.43 363.109C1094.61 370.422 1102.21 374.078 1111.21 374.078C1120.86 374.078 1129.02 370.328 1135.68 362.828C1142.33 355.234 1145.66 344.031 1145.66 329.219C1145.66 313.75 1142.47 302.266 1136.1 294.766C1129.72 287.266 1121.66 283.516 1111.91 283.516C1102.44 283.516 1094.61 287.219 1088.43 294.625C1082.33 301.938 1079.29 313.141 1079.29 328.234ZM1224.41 406V199.844H1264.91L1349.29 337.516V199.844H1387.96V406H1346.19L1263.08 271.562V406H1224.41ZM1422.69 329.219C1422.69 316.094 1425.93 303.391 1432.4 291.109C1438.86 278.828 1448.01 269.453 1459.82 262.984C1471.72 256.516 1484.99 253.281 1499.61 253.281C1522.21 253.281 1540.72 260.641 1555.16 275.359C1569.6 289.984 1576.82 308.5 1576.82 330.906C1576.82 353.5 1569.51 372.25 1554.88 387.156C1540.35 401.969 1522.02 409.375 1499.9 409.375C1486.21 409.375 1473.13 406.281 1460.66 400.094C1448.29 393.906 1438.86 384.859 1432.4 372.953C1425.93 360.953 1422.69 346.375 1422.69 329.219ZM1463.19 331.328C1463.19 346.141 1466.71 357.484 1473.74 365.359C1480.77 373.234 1489.44 377.172 1499.76 377.172C1510.07 377.172 1518.69 373.234 1525.63 365.359C1532.66 357.484 1536.18 346.047 1536.18 331.047C1536.18 316.422 1532.66 305.172 1525.63 297.297C1518.69 289.422 1510.07 285.484 1499.76 285.484C1489.44 285.484 1480.77 289.422 1473.74 297.297C1466.71 305.172 1463.19 316.516 1463.19 331.328ZM1592.01 406V375.203L1647.97 310.938C1657.16 300.438 1663.96 292.984 1668.36 288.578C1663.77 288.859 1657.72 289.047 1650.22 289.141L1597.49 289.422V256.656H1720.96V284.641L1663.86 350.453L1643.76 372.25C1654.72 371.594 1661.52 371.266 1664.15 371.266H1725.32V406H1592.01Z" fill="white"/>
<path opacity="0.9" d="M296.795 599.499C131.909 599.499 0 468.361 0 304.437C0 142.153 131.909 9.37476 296.795 9.37476H483.116C544.124 9.37476 591.941 58.5518 591.941 117.564V304.437C591.941 468.361 460.032 599.499 296.795 599.499Z" fill="#F25733"/>
<path d="M294.467 176.702C171.309 176.702 101.936 280.076 99.0428 284.476C91.91 295.315 91.91 309.334 99.0481 320.181C101.936 324.574 171.309 427.947 294.467 427.947C417.624 427.947 486.997 324.574 489.89 320.173C497.023 309.334 497.024 295.315 489.885 284.468C486.997 280.076 417.624 176.702 294.467 176.702ZM116.09 308.659C113.557 304.811 113.557 299.839 116.09 295.99C118.416 292.45 167.808 218.911 256.115 201.271C216.099 216.928 187.625 256.307 187.625 302.325C187.625 348.342 216.099 387.721 256.115 403.378C167.808 385.737 118.416 312.198 116.09 308.659ZM245.232 302.324C245.232 308.059 240.646 312.706 234.989 312.706C229.331 312.706 224.746 308.059 224.746 302.324C224.746 263.357 256.022 231.655 294.466 231.655C300.123 231.655 304.709 236.303 304.709 242.037C304.709 247.772 300.123 252.419 294.466 252.419C267.317 252.419 245.232 274.806 245.232 302.324ZM294.467 327.565C280.736 327.565 269.565 316.243 269.565 302.325C269.565 288.407 280.736 277.084 294.467 277.084C308.199 277.084 319.369 288.406 319.369 302.325C319.369 316.243 308.199 327.565 294.467 327.565ZM472.843 308.659C470.516 312.198 421.125 385.737 332.818 403.378C372.836 387.72 401.309 348.342 401.309 302.325C401.309 256.307 372.836 216.929 332.818 201.272C421.125 218.913 470.516 292.451 472.843 295.99C475.376 299.839 475.376 304.811 472.843 308.659Z" fill="#F9F2F9"/>
</svg>
<svg width="40" height="40" viewBox="0 0 40 40" fill="none" xmlns="http://www.w3.org/2000/svg">
<path opacity="0.9" d="M20.0557 39.9666C8.91365 39.9666 0 31.2241 0 20.2958C0 9.47687 8.91365 0.625 20.0557 0.625H32.6462C36.7688 0.625 40 3.90347 40 7.83763V20.2958C40 31.2241 31.0863 39.9666 20.0557 39.9666Z" fill="#DD431F"/>
<path d="M19.8991 11.7803C11.5768 11.7803 6.889 18.6718 6.69348 18.9652C6.21149 19.6878 6.21149 20.6224 6.69384 21.3455C6.889 21.6384 11.5768 28.5299 19.8991 28.5299C28.2214 28.5299 32.9092 21.6384 33.1047 21.345C33.5868 20.6224 33.5868 19.6878 33.1044 18.9647C32.9092 18.6718 28.2214 11.7803 19.8991 11.7803ZM7.84542 20.5774C7.67429 20.3208 7.67429 19.9894 7.84542 19.7328C8.00265 19.4968 11.3403 14.5942 17.3076 13.4182C14.6035 14.462 12.6793 17.0872 12.6793 20.1551C12.6793 23.2229 14.6035 25.8482 17.3076 26.892C11.3403 25.7159 8.00265 20.8133 7.84542 20.5774ZM16.5721 20.1551C16.5721 20.5374 16.2622 20.8472 15.8799 20.8472C15.4977 20.8472 15.1878 20.5374 15.1878 20.1551C15.1878 17.5573 17.3012 15.4438 19.8991 15.4438C20.2813 15.4438 20.5912 15.7536 20.5912 16.1359C20.5912 16.5183 20.2814 16.8281 19.8991 16.8281C18.0645 16.8281 16.5721 18.3205 16.5721 20.1551ZM19.8991 21.8378C18.9713 21.8378 18.2164 21.083 18.2164 20.1551C18.2164 19.2272 18.9713 18.4723 19.8991 18.4723C20.827 18.4723 21.5819 19.2272 21.5819 20.1551C21.5819 21.083 20.827 21.8378 19.8991 21.8378ZM31.9528 20.5774C31.7956 20.8133 28.458 25.7159 22.4907 26.892C25.1949 25.8481 27.1189 23.2229 27.1189 20.1551C27.1189 17.0873 25.1949 14.462 22.4907 13.4182C28.458 14.5943 31.7956 19.4968 31.9528 19.7328C32.124 19.9894 32.124 20.3208 31.9528 20.5774Z" fill="#F9F2F9"/>
</svg>

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

View File

@@ -0,0 +1,165 @@
/* eslint-disable react-hooks/exhaustive-deps */
import { notification } from 'antd';
import getLocalStorageApi from 'api/browser/localstorage/get';
import loginApi from 'api/user/login';
import { Logout } from 'api/utils';
import Spinner from 'components/Spinner';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import history from 'lib/history';
import React, { useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useDispatch, useSelector } from 'react-redux';
import { matchPath, Redirect, useLocation } from 'react-router-dom';
import { Dispatch } from 'redux';
import { AppState } from 'store/reducers';
import { getInitialUserTokenRefreshToken } from 'store/utils';
import AppActions from 'types/actions';
import { UPDATE_USER_IS_FETCH } from 'types/actions/app';
import AppReducer from 'types/reducer/app';
import { routePermission } from 'utils/permission';
import routes from './routes';
import afterLogin from './utils';
function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const { pathname } = useLocation();
const mapRoutes = useMemo(
() =>
new Map(
routes.map((e) => {
const currentPath = matchPath(pathname, {
path: e.path,
});
return [currentPath === null ? null : 'current', e];
}),
),
[pathname],
);
const {
isUserFetching,
isUserFetchingError,
isLoggedIn: isLoggedInState,
} = useSelector<AppState, AppReducer>((state) => state.app);
const { t } = useTranslation(['common']);
const dispatch = useDispatch<Dispatch<AppActions>>();
const currentRoute = mapRoutes.get('current');
const navigateToLoginIfNotLoggedIn = (isLoggedIn = isLoggedInState): void => {
dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
if (!isLoggedIn) {
history.push(ROUTES.LOGIN);
}
};
// eslint-disable-next-line sonarjs/cognitive-complexity
useEffect(() => {
(async (): Promise<void> => {
try {
const isLocalStorageLoggedIn =
getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true';
if (currentRoute) {
const { isPrivate, key } = currentRoute;
if (isPrivate) {
const localStorageUserAuthToken = getInitialUserTokenRefreshToken();
if (
localStorageUserAuthToken &&
localStorageUserAuthToken.refreshJwt &&
isUserFetching
) {
// localstorage token is present
const { refreshJwt } = localStorageUserAuthToken;
// renew web access token
const response = await loginApi({
refreshToken: refreshJwt,
});
if (response.statusCode === 200) {
const route = routePermission[key];
// get all resource and put it over redux
const userResponse = await afterLogin(
response.payload.userId,
response.payload.accessJwt,
response.payload.refreshJwt,
);
if (
userResponse &&
route.find((e) => e === userResponse.payload.role) === undefined
) {
history.push(ROUTES.UN_AUTHORIZED);
}
} else {
Logout();
notification.error({
message: response.error || t('something_went_wrong'),
});
}
} else {
// user does have localstorage values
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
}
} else {
// no need to fetch the user and make user fetching false
if (getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true') {
history.push(ROUTES.APPLICATION);
}
dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
}
} else if (pathname === ROUTES.HOME_PAGE) {
// routing to application page over root page
if (isLoggedInState) {
history.push(ROUTES.APPLICATION);
} else {
navigateToLoginIfNotLoggedIn();
}
} else {
// not found
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
}
} catch (error) {
// something went wrong
history.push(ROUTES.SOMETHING_WENT_WRONG);
}
})();
}, [dispatch, isLoggedInState, currentRoute]);
if (isUserFetchingError) {
return <Redirect to={ROUTES.SOMETHING_WENT_WRONG} />;
}
if (isUserFetching) {
return <Spinner tip="Loading..." />;
}
// NOTE: disabling this rule as there is no need to have div
// eslint-disable-next-line react/jsx-no-useless-fragment
return <>{children}</>;
}
interface PrivateRouteProps {
children: React.ReactChild;
}
export default PrivateRoute;

View File

@@ -1,42 +1,36 @@
import NotFound from 'components/NotFound';
import Spinner from 'components/Spinner';
import ROUTES from 'constants/routes';
import AppLayout from 'container/AppLayout';
import history from 'lib/history';
import React, { Suspense } from 'react';
import { useSelector } from 'react-redux';
import { Redirect, Route, Router, Switch } from 'react-router-dom';
import { AppState } from 'store/reducers';
import AppReducer from 'types/reducer/app';
import { Route, Router, Switch } from 'react-router-dom';
import PrivateRoute from './Private';
import routes from './routes';
function App(): JSX.Element {
const { isLoggedIn } = useSelector<AppState, AppReducer>((state) => state.app);
return (
<Router history={history}>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route key={`${path}`} exact={exact} path={path} component={component} />
))}
<Route
path="/"
exact
render={(): JSX.Element =>
isLoggedIn ? (
<Redirect to={ROUTES.APPLICATION} />
) : (
<Redirect to={ROUTES.SIGN_UP} />
)
}
/>
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
<PrivateRoute>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => {
return (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
);
})}
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</PrivateRoute>
</Router>
);
}

View File

@@ -12,10 +12,7 @@ export const ServiceMetricsPage = Loadable(
);
export const ServiceMapPage = Loadable(
() =>
import(
/* webpackChunkName: "ServiceMapPage" */ 'modules/Servicemap/ServiceMap'
),
() => import(/* webpackChunkName: "ServiceMapPage" */ 'modules/Servicemap'),
);
export const TraceFilter = Loadable(
@@ -27,10 +24,7 @@ export const TraceDetail = Loadable(
);
export const UsageExplorerPage = Loadable(
() =>
import(
/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage/UsageExplorerDef'
),
() => import(/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage'),
);
export const SignupPage = Loadable(
@@ -83,5 +77,44 @@ export const EditAlertChannelsAlerts = Loadable(
);
export const AllAlertChannels = Loadable(
() => import(/* webpackChunkName: "All Channels" */ 'pages/AllAlertChannels'),
() => import(/* webpackChunkName: "All Channels" */ 'pages/Settings'),
);
export const AllErrors = Loadable(
/* webpackChunkName: "All Exceptions" */ () => import('pages/AllErrors'),
);
export const ErrorDetails = Loadable(
() => import(/* webpackChunkName: "Error Details" */ 'pages/ErrorDetails'),
);
export const StatusPage = Loadable(
() => import(/* webpackChunkName: "All Status" */ 'pages/Status'),
);
export const OrganizationSettings = Loadable(
() => import(/* webpackChunkName: "All Settings" */ 'pages/Settings'),
);
export const MySettings = Loadable(
() => import(/* webpackChunkName: "All MySettings" */ 'pages/MySettings'),
);
export const Login = Loadable(
() => import(/* webpackChunkName: "Login" */ 'pages/Login'),
);
export const UnAuthorized = Loadable(
() => import(/* webpackChunkName: "UnAuthorized" */ 'pages/UnAuthorized'),
);
export const PasswordReset = Loadable(
() => import(/* webpackChunkName: "ResetPassword" */ 'pages/ResetPassword'),
);
export const SomethingWentWrong = Loadable(
() =>
import(
/* webpackChunkName: "SomethingWentWrong" */ 'pages/SomethingWentWrong'
),
);

View File

@@ -4,21 +4,30 @@ import { RouteProps } from 'react-router-dom';
import {
AllAlertChannels,
AllErrors,
CreateAlertChannelAlerts,
CreateNewAlerts,
DashboardPage,
EditAlertChannelsAlerts,
EditRulesPage,
ErrorDetails,
InstrumentationPage,
ListAllALertsPage,
Login,
MySettings,
NewDashboardPage,
OrganizationSettings,
PasswordReset,
ServiceMapPage,
ServiceMetricsPage,
ServicesTablePage,
SettingsPage,
SignupPage,
SomethingWentWrong,
StatusPage,
TraceDetail,
TraceFilter,
UnAuthorized,
UsageExplorerPage,
} from './pageComponents';
@@ -27,99 +36,199 @@ const routes: AppRoutes[] = [
component: SignupPage,
path: ROUTES.SIGN_UP,
exact: true,
isPrivate: false,
key: 'SIGN_UP',
},
{
component: ServicesTablePage,
path: ROUTES.APPLICATION,
exact: true,
isPrivate: true,
key: 'APPLICATION',
},
{
path: ROUTES.SERVICE_METRICS,
exact: true,
component: ServiceMetricsPage,
isPrivate: true,
key: 'SERVICE_METRICS',
},
{
path: ROUTES.SERVICE_MAP,
component: ServiceMapPage,
isPrivate: true,
exact: true,
key: 'SERVICE_MAP',
},
{
path: ROUTES.TRACE_DETAIL,
exact: true,
component: TraceDetail,
isPrivate: true,
key: 'TRACE_DETAIL',
},
{
path: ROUTES.SETTINGS,
exact: true,
component: SettingsPage,
isPrivate: true,
key: 'SETTINGS',
},
{
path: ROUTES.USAGE_EXPLORER,
exact: true,
component: UsageExplorerPage,
isPrivate: true,
key: 'USAGE_EXPLORER',
},
{
path: ROUTES.INSTRUMENTATION,
exact: true,
component: InstrumentationPage,
isPrivate: true,
key: 'INSTRUMENTATION',
},
{
path: ROUTES.ALL_DASHBOARD,
exact: true,
component: DashboardPage,
isPrivate: true,
key: 'ALL_DASHBOARD',
},
{
path: ROUTES.DASHBOARD,
exact: true,
component: NewDashboardPage,
isPrivate: true,
key: 'DASHBOARD',
},
{
path: ROUTES.DASHBOARD_WIDGET,
exact: true,
component: DashboardWidget,
isPrivate: true,
key: 'DASHBOARD_WIDGET',
},
{
path: ROUTES.EDIT_ALERTS,
exact: true,
component: EditRulesPage,
isPrivate: true,
key: 'EDIT_ALERTS',
},
{
path: ROUTES.LIST_ALL_ALERT,
exact: true,
component: ListAllALertsPage,
isPrivate: true,
key: 'LIST_ALL_ALERT',
},
{
path: ROUTES.ALERTS_NEW,
exact: true,
component: CreateNewAlerts,
isPrivate: true,
key: 'ALERTS_NEW',
},
{
path: ROUTES.TRACE,
exact: true,
component: TraceFilter,
isPrivate: true,
key: 'TRACE',
},
{
path: ROUTES.CHANNELS_NEW,
exact: true,
component: CreateAlertChannelAlerts,
isPrivate: true,
key: 'CHANNELS_NEW',
},
{
path: ROUTES.CHANNELS_EDIT,
exact: true,
component: EditAlertChannelsAlerts,
isPrivate: true,
key: 'CHANNELS_EDIT',
},
{
path: ROUTES.ALL_CHANNELS,
exact: true,
component: AllAlertChannels,
isPrivate: true,
key: 'ALL_CHANNELS',
},
{
path: ROUTES.ALL_ERROR,
exact: true,
isPrivate: true,
component: AllErrors,
key: 'ALL_ERROR',
},
{
path: ROUTES.ERROR_DETAIL,
exact: true,
component: ErrorDetails,
isPrivate: true,
key: 'ERROR_DETAIL',
},
{
path: ROUTES.VERSION,
exact: true,
component: StatusPage,
isPrivate: true,
key: 'VERSION',
},
{
path: ROUTES.ORG_SETTINGS,
exact: true,
component: OrganizationSettings,
isPrivate: true,
key: 'ORG_SETTINGS',
},
{
path: ROUTES.MY_SETTINGS,
exact: true,
component: MySettings,
isPrivate: true,
key: 'MY_SETTINGS',
},
{
path: ROUTES.LOGIN,
exact: true,
component: Login,
isPrivate: false,
key: 'LOGIN',
},
{
path: ROUTES.UN_AUTHORIZED,
exact: true,
component: UnAuthorized,
key: 'UN_AUTHORIZED',
isPrivate: true,
},
{
path: ROUTES.PASSWORD_RESET,
exact: true,
component: PasswordReset,
key: 'PASSWORD_RESET',
isPrivate: false,
},
{
path: ROUTES.SOMETHING_WENT_WRONG,
exact: true,
component: SomethingWentWrong,
key: 'SOMETHING_WENT_WRONG',
isPrivate: false,
},
];
interface AppRoutes {
export interface AppRoutes {
component: RouteProps['component'];
path: RouteProps['path'];
exact: RouteProps['exact'];
isPrivate?: boolean;
isPrivate: boolean;
key: keyof typeof ROUTES;
}
export default routes;

View File

@@ -0,0 +1,91 @@
import getLocalStorageApi from 'api/browser/localstorage/get';
import setLocalStorageApi from 'api/browser/localstorage/set';
import getUserApi from 'api/user/getUser';
import { Logout } from 'api/utils';
import { LOCALSTORAGE } from 'constants/localStorage';
import store from 'store';
import AppActions from 'types/actions';
import {
LOGGED_IN,
UPDATE_USER,
UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
UPDATE_USER_IS_FETCH,
} from 'types/actions/app';
import { SuccessResponse } from 'types/api';
import { PayloadProps } from 'types/api/user/getUser';
const afterLogin = async (
userId: string,
authToken: string,
refreshToken: string,
): Promise<SuccessResponse<PayloadProps> | undefined> => {
setLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN, authToken);
setLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN, refreshToken);
store.dispatch<AppActions>({
type: UPDATE_USER_ACCESS_REFRESH_ACCESS_TOKEN,
payload: {
accessJwt: authToken,
refreshJwt: refreshToken,
},
});
const [getUserResponse] = await Promise.all([
getUserApi({
userId,
token: authToken,
}),
]);
if (getUserResponse.statusCode === 200 && getUserResponse.payload) {
store.dispatch<AppActions>({
type: LOGGED_IN,
payload: {
isLoggedIn: true,
},
});
const { payload } = getUserResponse;
store.dispatch<AppActions>({
type: UPDATE_USER,
payload: {
ROLE: payload.role,
email: payload.email,
name: payload.name,
orgName: payload.organization,
profilePictureURL: payload.profilePictureURL,
userId: payload.id,
orgId: payload.orgId,
},
});
const isLoggedInLocalStorage = getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN);
if (isLoggedInLocalStorage === null) {
setLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN, 'true');
}
store.dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
return getUserResponse;
}
store.dispatch({
type: UPDATE_USER_IS_FETCH,
payload: {
isUserFetching: false,
},
});
Logout();
return undefined;
};
export default afterLogin;

View File

@@ -12,7 +12,7 @@ i18n
.use(initReactI18next)
// init i18next
.init({
debug: true,
debug: false,
fallbackLng: 'en',
interpolation: {
escapeValue: false, // not needed for react as it escapes by default

View File

@@ -21,10 +21,15 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
};
}
const { errors, error } = data;
const errorMessage =
Array.isArray(errors) && errors.length >= 1 ? errors[0].msg : error;
return {
statusCode,
payload: null,
error: data.error,
error: errorMessage,
message: null,
};
}

View File

@@ -1,4 +1,6 @@
const apiV1 = '/api/v1/';
export const apiV2 = '/api/alertmanager';
export const apiV2 = '/api/v2/';
export const apiAlertManager = '/api/alertmanager';
export default apiV1;

Some files were not shown because too many files have changed in this diff Show More