Compare commits

...

914 Commits

Author SHA1 Message Date
Ankit Nayan
0db4073e94 chore: different ticker interval for active user
(cherry picked from commit 215ea8d819)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2023-01-08 21:25:47 +00:00
Prashant Shahi
d134e4f4d9 chore: 📌 pin versions: SigNoz 0.13.0, SigNoz OtelCollector 0.66.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-29 14:27:24 +05:30
Ankit Nayan
e03b0aa45f chore/analytics (#1939)
* fix: not capturing empty filters

* feat: removing signoz_ metrics using grep

* fix: initialise companyDomain

* feat: added ttl status
2022-12-29 01:14:57 +05:30
Vishal Sharma
46e131698e fix: exception filter clear (#1936) 2022-12-28 17:48:39 +05:30
Ankit Nayan
d1ee15c372 fix: nil pointer 2022-12-28 15:30:24 +05:30
Ankit Nayan
1e035be978 Merge branch 'develop' into chore/analytics 2022-12-28 15:26:59 +05:30
Vishal Sharma
88a97fc4b8 add exception page filters support (#1919)
* feat: backend changes for supporting exception filters

* feat: frontend changes for exception page filter support

* chore: extractSingleFilterValue is updated

* fix: handle frontend edge case

Co-authored-by: Ankit Nayan <ankit@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-28 14:54:15 +05:30
Nityananda Gohain
2e58f6db7a fix: error handling for index removal from selected field (#1935)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-28 14:31:57 +05:30
Amol Umbark
1916fc87b0 fix: added clear filters button (#1920)
* fix: added clear filters button

* fix: removed console log


Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-28 14:30:37 +05:30
Ankit Nayan
d8882acdd7 fix: changed or to and 2022-12-28 02:34:07 +05:30
Ankit Nayan
7f42b39684 fix: changed or to and 2022-12-28 02:33:21 +05:30
Ankit Nayan
b11f79b4c7 Chore/analytics (#1922)
* fix: reduced rate limit to 2 of each events in 1 min

* feat: added new event for length of filters in logs search page

* feat: added distributed cluster info

* fix: length of filters in logs

* feat: dashboard metadata with no rateLimit

* feat: active user

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-28 02:16:46 +05:30
Ankit Nayan
c717e39a1a Merge branch 'chore/analytics' of https://github.com/SigNoz/signoz into chore/analytics 2022-12-28 02:10:36 +05:30
Ankit Nayan
c3253687d0 feat: active user 2022-12-28 02:09:44 +05:30
Yash Joshi
895c721b37 fix(version): use link instead of click handler (#1931)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-12-27 23:13:13 +05:30
Vishal Sharma
35f5fb6957 fix: respect durationSort feature flag on getSpanFilters API (#1900)
* fix: respect durationSort feature flag on getSpanFilters API

* chore: update DB query
2022-12-27 21:09:36 +05:30
Palash Gupta
40ec4517c2 fix: per page is added in the dependancy (#1926) 2022-12-27 19:01:56 +05:30
Srikanth Chekuri
48a6f536fa chore: increase dimensions_cache_size for signozspanmetrics processor (#1925) 2022-12-27 15:44:39 +05:30
Palash Gupta
13a6d7f7c6 fix: live tail time out is updated (#1899)
* fix: live tail time out is updated
* Update livetail.ts

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-27 13:36:37 +05:30
Srikanth Chekuri
8b6ed0f951 Merge branch 'develop' into chore/analytics 2022-12-27 12:21:51 +05:30
Srikanth Chekuri
eef48c54f8 fix(query_range): invalid memory address or nil pointer dereference (#1875)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-27 11:28:15 +05:30
Ankit Nayan
aad962d07d feat: dashboard metadata with no rateLimit 2022-12-27 01:10:01 +05:30
Ankit Nayan
18bbb3cf36 fix: length of filters in logs 2022-12-26 23:10:55 +05:30
Ankit Nayan
a3455fb553 feat: added distributed cluster info 2022-12-26 23:01:54 +05:30
Ankit Nayan
ece2988d0d feat: added new event for length of filters in logs search page 2022-12-26 22:11:23 +05:30
Ankit Nayan
db704b212d fix: reduced rate limit to 2 of each events in 1 min 2022-12-26 21:52:54 +05:30
Amol Umbark
4b13b0a8a4 fix: resolves issue related ops not flowing from search box to panel (#1918) 2022-12-26 20:31:50 +05:30
Palash Gupta
6f6499c267 fix: flush logs before starting (#1912)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:25:55 +05:30
Prashant Shahi
3dcb44a758 fix docker-compose for swarm and related changes for distributed clickhouse (#1863)
* chore: 🔧 fix docker-compose.yaml for swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🔧 add .gitkeep files for docker and swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:16:47 +05:30
Palash Gupta
0595cdc7af fix: scroll is added (#1873)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:14:54 +05:30
Palash Gupta
092c02762f feat: add no found with no events are present (#1874)
* chore: not found component is updated
* feat: no events handling is updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 17:14:17 +05:30
Palash Gupta
d1d2829d2b fix: logs issues (#1889)
* changed debounce interval to 600ms

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 16:45:28 +05:30
Palash Gupta
ac446294e7 fix: logs selection of filter is fixed (#1910)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 16:20:34 +05:30
Marius Kimmina
1cceab4d5e fix(FE): remove unnecessary complexity from password check (#1904)
Signed-off-by: Marius Kimmina <mar.kimmina@gmail.com>
2022-12-26 16:02:18 +05:30
Ankit Nayan
02898d14f9 fix: removes password validations other than length (#1909) 2022-12-26 15:42:08 +05:30
Nityananda Gohain
09af6c262c fix: proxy_read_timeout updated in nginx conf (#1885)
* fix: proxy_read_timeout updated in nginx conf
* fix: live tail endpoint-flush the headers first

Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 15:29:49 +05:30
Amol Umbark
faeaeb61a0 fix: added validations on query builder (#1906)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-26 15:10:01 +05:30
Nityananda Gohain
9c80ba6b78 fix: allow multiple spaces between a filter expression (#1897)
* fix: allow multiple spaces between a filter expression

* fix: regex updated to respect spaces between a search string


Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-26 15:08:43 +05:30
Palash Gupta
dbba8b5b55 feat: event time is updated when root span is missing 2022-12-22 17:35:20 +05:30
Pranay Prateek
58ce838023 chore: Updating stale edition message (#1896) 2022-12-22 11:44:28 +05:30
Srikanth Chekuri
5260b152f5 fix: do not show result of sub queries in external calls (#1858) 2022-12-20 19:54:27 +05:30
Ankit Nayan
f2dd254d83 Merge pull request #1849 from SigNoz/release/v0.12.0
Release/v0.12.0
2022-12-11 00:14:59 +05:30
Prashant Shahi
82d53fa45c chore: 📌 pin versions: SigNoz 0.12.0, SigNoz OtelCollector 0.66.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-10 20:17:49 +05:30
Zsombor
c38d1c150d Fix case sensitivity in query parsing (#1670)
* Fix case sensitivity in query parsing - now the parser correctly recognize fields which contains uppercase letters

* fix: logs parser respects the case of fields

Co-authored-by: nityanandagohain <nityanandagohain@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-10 19:27:57 +05:30
Srikanth Chekuri
16170eacc0 Revert "chore: use local table for innery query (#1815) (#1847)
* Revert "chore: use local table for innery query (#1815)"

This reverts commit 1b52edb056.

* chore: use localhost
2022-12-10 19:25:44 +05:30
Amol Umbark
66ddbfc085 fix: solves issue legend update causing null ch query (#1845)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-10 12:21:20 +05:30
Vishal Sharma
2715ab61a4 chore: introduce docker_multi_node_cluster and by default set to false (#1839)
* chore: introduce docker_multi_node_cluster and by default set to false

* chore(query-service): 🔧 include docker_multi_node_cluster for tests

Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Prashant Shahi <me@prashantshahi.dev>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 21:57:25 +05:30
Amol Umbark
4d291e92b9 fix: changed table names in default alert queries (#1843)
Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
2022-12-09 21:54:51 +05:30
Nityananda Gohain
1b73649f8e fix: add default value for materialized column in distributed logs table (#1835)
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 20:18:58 +05:30
Amol Umbark
0abae1c09c feat: show release note in alerts dashboards and services pages (#1840)
* feat: show release note in alerts dashboards and services pages

* fix: made code changes as per review and changed message in release note

* fix: solved build pipeline issue

* fix: solved lint issue

Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-09 20:16:09 +05:30
Pranay Prateek
4d02603aed Update README.md 2022-12-09 14:01:05 +05:30
Pranay Prateek
c58e43a678 Update README.md 2022-12-09 12:54:34 +05:30
Pranay Prateek
b77bbe1e4f Update README.md 2022-12-09 12:50:41 +05:30
Pranay Prateek
d4eb241c04 Update README.md 2022-12-09 12:48:57 +05:30
Pranay Prateek
98e1a77a43 Update README.md 2022-12-09 12:48:30 +05:30
Pranay Prateek
498b04491b updated logs image 2022-12-09 12:42:25 +05:30
Pranay Prateek
4e58414cc2 Update README.md 2022-12-09 12:36:05 +05:30
Pranay Prateek
67943cfec0 Update README.md 2022-12-09 12:32:03 +05:30
Palash Gupta
f170eb1b23 fix: scroll is added in case of extra space (#1838) 2022-12-09 10:00:55 +05:30
Vishal Sharma
6931b18382 fix: remove shared variable in TTL and async TTL queries (#1821)
* fix: remove shared variable in TTL

* fix: set distributed_ddl_task_timeout to 0 for async TTL

* chore: updated distributed_ddl_task_timeout to sync TTL queries
2022-12-07 18:23:01 +05:30
Prashant Shahi
8a9d6f664a fix: 🐛 log parsing issue (#1824)
Signed-off-by: Prashant Shahi <prashant@signoz.io>

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2022-12-07 17:57:55 +05:30
Amol Umbark
8affe8df31 fix: solved issue with google help link (#1826) 2022-12-07 16:10:17 +05:30
Nityananda Gohain
1c8626e933 feat: usage collection updated for ee (#1654)
* feat: usage collection updated with new schema and logic

* fix: added exporter id and common collector id

* fix: upload usage only when license is present

* fix: handle if db doesn't exists

* fix: select query updated for usage collection to support distributed table

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:52:39 +05:30
Amol Umbark
87932de668 [feat] ee/google auth implementation (#1775)
* [feat] initial version for google oauth

* chore: arranged the sso packages and added prepare request for google auth

* feat: added google auth config page and backend to handle the request

* chore: code cleanup for domain SSO parsing

* Update constants.go

* chore: moved redirect sso error

* chore: lint issue fixed with domain

* chore: added tooltip for enforce sso and few changes to auth domain

* chore: moved question mark in enforce sso

* fix: resolved pr review comments

* chore: fixed type check for saml config

* fix: fixed saml config form

* chore: added util for transformed form values to samlconfig

Co-authored-by: mindhash <mindhash@mindhashs-MacBook-Pro.local>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:32:59 +05:30
Srikanth Chekuri
1b52edb056 chore: use local table for innery query (#1816)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 22:31:38 +05:30
Priyanka Chakraborty
5a81557df7 1374 dbcalls querybuilder (#1608)
* refactor: dbcalls-fromPromql-querybuilder


Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-06 16:52:20 +05:30
Prashant Shahi
8bb3eefeb5 chore(clickhouse): 🔧 include cluster.xml for distributed set up (#1810)
* chore(clickhouse): 🔧 include cluster.xml for distributed set up

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-12-05 17:26:13 +05:30
Amol Umbark
a46f074e22 fix: resolves empty variables issue for imported dashboards (#1808) 2022-12-05 16:48:11 +05:30
Prashant Shahi
88fa3b7699 feat(distributed): create single docker-compose.yaml and CH configuration (#1803)
* feat: setup for distributed clickhouse

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-12-05 16:24:01 +05:30
Ankit Nayan
7f77bcca2b Feat/distributed ch (#1701)
* feat: support for distributed table

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-12-02 12:30:28 +05:30
Palash Gupta
ab5311caac feat: events is updated by adding the timestamp (#1802)
* feat: events is updated
* chore: title is updated
* feat: trace detail event timestamp is updated
2022-12-02 10:34:06 +05:30
Palash Gupta
8aae9f53a9 feat: search in tags is updated (#1788)
* feat: search in tags is updated

* chore: placeholder is updated
2022-12-01 14:19:12 +05:30
Ankit Nayan
18d80d47e5 Merge pull request #1776 from SigNoz/release/v0.11.4
Release/v0.11.4
2022-11-29 17:36:31 +05:30
Prashant Shahi
8e5522820c chore: 📌 pin versions: SigNoz 0.11.4
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-11-29 17:12:17 +05:30
Palash Gupta
5ae9557293 fix: logs time is fixed (#1772)
* fix: logs parsing is fixed

* fix: start and end time is updated
2022-11-29 14:41:36 +05:30
Palash Gupta
7e590f4bfb feat: meta description and image is updated (#1764)
* feat: meta description is updated

* chore: image is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-28 18:50:17 +05:30
Palash Gupta
ce072bdc3f fix: trace event is now not decoding the events (#1766)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-28 18:27:09 +05:30
Nityananda Gohain
67c0c9032f fix: logs aggreagte endpoint updated to differentiate between params and query string (#1768) 2022-11-28 18:16:21 +05:30
Palash Gupta
6c9036fbf4 fix[logs][FE]: live tail is fixed (#1759)
* fix: live tail is fixed

* fix: graph state is updated

* chore: step size is updated

* chore: xaxis config is updated

* chore: isDisabled state is updated for top navigation

* chore: selected interval is updated in the reducer

* fix: build is fixed

* chore: xAxis config is updated

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-28 15:44:33 +05:30
Nityananda Gohain
d06d41af87 fix: parser updated to differentiate between params and query string (#1763) 2022-11-28 14:18:43 +05:30
Amol Umbark
2771d2e774 fix: [alerts] [ch-query] added aliases in metric query result (#1760)
* fix: [alerts] [ch-query] added aliases in metric query result

* fix: added more column type support for target in ch query

* fix: added error handling when data type is unexpected in metric result

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-27 14:29:09 +05:30
Amol Umbark
0cbba071ea fix: [alerts] fixed selected interval for chart preview in ch use case (#1761) 2022-11-25 16:04:09 +05:30
Amol Umbark
7cec2db503 fix: [alerts] solved legend not updating issue in ch query editor (#1757)
* fix: [alerts] solved legend not updating issue in ch query editor

* fix: [alerts]removed console.log

* fix: added jsdoc description tag
2022-11-25 12:16:47 +05:30
Amol Umbark
4b3829fd5b fix: fixed date condition (start and end) while preparing ch query (#1751)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-24 18:19:07 +05:30
Vishal Sharma
983ca1ec6a feat: introduce getSubTreeSpans function in clickhouse query builder & introduce smart trace detail algorithm (#1648)
* perf: introduce smart trace detail algorithm
* fix: remove hardcoded levels and handle null levels
* feat: add support for broken trees
* feat: use spanLimit env variable
* fix: handle missing root span
* add root spanId and root name
* use permanent table
* add kind, events and tagmap support
* fix query formation
* increase context timeout to 600s
* perf improvement
* handle error
* return tableName as response to query
* support multiple queries tableName
* perf: improve memory and latency
* feat: add getSubTree custom func and smart trace detail algo to ee
* fix: create new functions for ee
* chore: refactor codebase
* chore: refactor frontend code


Co-authored-by: Ankit Nayan <ankit@signoz.io>
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-24 18:18:19 +05:30
Amol Umbark
33d34af2a6 feat: added exception based alerts (#1752) 2022-11-24 18:00:02 +05:30
Vishal Sharma
b0ec619881 fix: trace table pagination (#1749)
* fix: trace table pagination

* chore: refactor

* chore: refactor

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-24 16:25:26 +05:30
Amol Umbark
220f848b04 feat: [UI] clickhouse queries in alert builder (#1706)
* feat: added ui changes to support clickhouse queries in alert builder

* chore: minor fix to alert rules ui

* feat: alert form changes: ch query support, alert type selection

* chore: resolved review comments

* chore: added list for alert type selection instead

* chore: removed hard coded color and added antd/colors

* fix: resolved some issues found during testing alerts

* fix: moved alert defaults and added default queries for logs and traces

* feat: added default queries for logs and traces to reflect ts vars

* chore: fixed px to rem

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-24 13:21:46 +05:30
Palash Gupta
4727dbc9f0 fix: if invalid switch is disabled (#1656)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-24 00:08:56 +05:30
Amol Umbark
00863e54de feat: added ch query support (#1735)
* feat: added ch query support
* fix: added new vars to resolve alert query format issue
* fix: replaced timestamp vars in metric query range

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-23 18:49:03 +05:30
Ankit Nayan
e9c47a6a73 Merge branch 'develop' of https://github.com/SigNoz/signoz into develop 2022-11-23 16:58:05 +05:30
Ankit Nayan
88af456915 chore: detect first registration 2022-11-23 16:57:49 +05:30
Ankit Nayan
7ebc94c273 display message updated (#1744)
* display message updated

* chore: display message changed
2022-11-23 16:44:47 +05:30
Palash Gupta
d5bd991417 fix: onApply data is updated (#1655) 2022-11-23 16:25:02 +05:30
Palash Gupta
4c0d573760 fix: Logs issues are fixed (#1727)
* feat: logs is updated
* chore: width:100% is removed
* chore: position of filter is updated
* chore: min time and max time are now tracked from global state


Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-23 13:42:36 +05:30
Vishal Sharma
1273bb5865 fix: getNanoTimestamp function and cache fix (#1737) 2022-11-22 13:13:10 +05:30
Palash Gupta
87502baabf feat: filter is added in exceptions page (#1731)
* feat: filter is added

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2022-11-22 12:08:51 +05:30
Palash Gupta
90a6313423 feat: value graph is updated (#1733) 2022-11-21 21:03:33 +05:30
Palash Gupta
4a244ad7b2 feat: onClick is updated (#1732)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-21 16:44:41 +05:30
Palash Gupta
db105af89f refactor: some of the styles are removed and used native antd components (#1730) 2022-11-21 13:39:54 +05:30
Palash Gupta
b8c58a9812 chore: removed unnessesary eslint check (#1668)
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-18 19:04:40 +05:30
Ankit Nayan
78d2377520 Merge pull request #1722 from SigNoz/release/v0.11.3
Release/v0.11.3
2022-11-16 19:50:55 +05:30
Ankit Nayan
549535d09e Update README.md
Added Ruby

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-16 19:12:36 +05:30
Palash Gupta
ac4d35c6c0 chore: alignment is fixed in header (#1723)
* chore: alignment is fixed
2022-11-16 19:08:09 +05:30
Prashant Shahi
ad34c6e25f Merge branch 'develop' into release/v0.11.3 2022-11-16 17:37:56 +05:30
Prashant Shahi
c306701bab chore: 📌 pin versions: SigNoz 0.11.3, SigNoz OtelCollector 0.63.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-11-16 17:33:27 +05:30
Pranay Prateek
fcc725c6e6 Update README.md 2022-11-16 17:17:08 +05:30
Prashant Shahi
d615d7a9e3 Updating collection interval in otelcol configuration files (#1720)
* chore: 🔧 set collection interval of hostmetrics to 30s while others to 60s
2022-11-15 20:33:56 +05:30
Prashant Shahi
622943645f Bump version of clickhouse to 22.8.8 LTS and deploy file changes (#1711)
* chore: 🔥 remove docker-compose-prod.yaml as redundant and update Makefile
* chore: 🔧 scrape otel-collector internal metrics in same container and related changes
* chore: 📌 Bump version of clickhouse to 22.8.8 LTS

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-11-15 20:07:09 +05:30
Srikanth Chekuri
355264a43e chore: bump SigNoz/prometheus to v1.9.76 (#1719) 2022-11-15 18:45:47 +05:30
Srikanth Chekuri
2c7deca2ec fix: include inner panels support and map job,instance correctly (#1718)
* fix: include inner panels support and map job,instance correctly

* chore: remove debug and tidy up bit
2022-11-15 18:23:20 +05:30
Vishal Sharma
e558dcae3a fix: update trace URI when coming from metrics (#1715) 2022-11-15 13:08:48 +05:30
Srikanth Chekuri
4cf3dc2ec3 fix: remove usage of labels object (#1710)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-11-14 22:51:23 +05:30
Palash Gupta
2e124da366 feat: refresh interval is added (#1712)
* feat: refresh interval is added
2022-11-14 22:32:19 +05:30
Vishal Sharma
a50d7f227c Feat: dynamic tooltip (#1705)
* feat: integrate config service with query service
* feat: add tooltip checkpoint
* feat: add support for dark and light mode icons

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-14 14:29:13 +05:30
Ankit Nayan
73706d872f Update telemetry.go 2022-11-12 17:19:34 +05:30
Palash Gupta
0480197914 fix Logs contains issue (#1708)
* chore: logs is updated
* chore: contains is updated
2022-11-12 11:37:52 +05:30
Palash Gupta
65af8c1b98 801 dropdown is added in the dashboard page (#1669)
* chore: update the import from constant rather than static string

* chore: removed redundant div

* feat: added auto refresh component

* refactor: top nav is refactored
2022-11-10 20:48:40 +05:30
Nityananda Gohain
a3b03ef0ca fix: parser updated to support escaped quotes in search (#1704)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-11-10 18:24:20 +05:30
Srikanth Chekuri
9735a6e5ce feat: add ability to import Grafana dashboards (#1700)
* feat: add ability to import Grafana dashboards

* chore: remove unnecessary file

* chore: more 9XX support

* chore: some more hacks

* chore: update deps

* chore: arrange equal spaced widgets instead of inheriting from grafana
2022-11-10 16:49:54 +05:30
Vishal Sharma
674883cd18 Feature flagging (#1674)
* feat: introduce feature flagging via env variables
* refactor: enable sorting by default for users
2022-11-09 08:30:00 +05:30
Pang
36315fcf9c fix README.zh-cn.md readable (#1647)
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-11-03 06:09:15 +05:30
Palash Gupta
46050a217c feat: all trace now open in new tab (#1662) 2022-10-26 12:53:47 +05:30
Ankit Nayan
c9363586e1 Merge branch 'main' into develop 2022-10-17 14:36:38 +05:30
Ankit Nayan
5eed384ffe Merge pull request #1637 from SigNoz/release/v0.11.2
Release/v0.11.2
2022-10-13 16:39:48 +05:30
Prashant Shahi
1b152c19ec ci(e2e): 👷 enable DEV_BUILD flag for query-service (#1636)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-10-13 16:10:36 +05:30
Prashant Shahi
6a3c1c10fb chore(release): 📌 pin versions: SigNoz 0.11.2, OtelCollector 0.55.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-10-13 15:28:03 +05:45
Palash Gupta
f580bedb1c 1627 login: onsubmit is added (#1635)
* feat: onsubmit is updated
* chore: precheckComplete handler is updated
2022-10-13 14:20:25 +05:30
Prashant Shahi
acd15af823 ci(e2e): 👷 ee build for query-service (#1633)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-10-13 08:58:06 +05:30
Nityananda Gohain
134c5dc1d2 fix: disable usage collection (#1631) 2022-10-12 12:04:36 +05:30
Palash Gupta
57f4f098f7 feat: onsubmit is updated (#1628) 2022-10-11 19:38:22 +05:30
Ankit Nayan
fce4496214 chore: rateLimit added 2022-10-11 18:35:05 +05:30
Palash Gupta
4e38f1dcc0 chore: free plan config is updated (#1625)
* chore: free plan config is updated


* fix: solved empty state issue with no auth domains

Co-authored-by: Amol <amolumbarkar@gmail.com>
2022-10-11 15:48:58 +05:30
Ankit Nayan
fe0f305ea7 Merge branch 'develop' of https://github.com/SigNoz/signoz into develop 2022-10-11 00:44:44 +05:30
Ankit Nayan
1374444f36 chore: analytics 2022-10-11 00:43:54 +05:30
Nityananda Gohain
fe0a4ab0cb Fix/delete old snapshot (#1621)
* fix: remove old snapshots
2022-10-07 20:06:01 +05:30
Srikanth Chekuri
f2f2069835 chore: bump SigNoz/prometheus to v1.9.74 (#1620) 2022-10-07 19:00:27 +05:30
Prashant Shahi
90d0c72aa2 ci(push): 👷 make ee query-service build default (#1616)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-10-07 14:23:01 +05:30
Nityananda Gohain
90d1a87027 fix: usage collection frequency updated (#1617) 2022-10-07 11:48:22 +05:30
Amol Umbark
9c4521b34a feat: enterprise edition (#1575)
* feat: added license manager and feature flags
* feat: completed org domain api
* chore: checking in saml auth handler code
* feat: added signup with sso
* feat: added login support for admins
* feat: added pem support for certificate
* ci(build-workflow): 👷 include EE query-service
* fix: 🐛 update package name
* chore(ee): 🔧 LD_FLAGS related changes

Signed-off-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: nityanandagohain <nityanandagohain@gmail.com>
2022-10-06 20:13:30 +05:30
Amol Umbark
106033c296 Feature: SSO Login and Feature gating in UI (#1605)
* feat: added usefeatureflags hook and relevant code
* chore: resolved lint issues
* chore: applied translations
* feat: added signup for sso
2022-10-04 13:43:58 +05:30
Palash Gupta
9372f763c8 feat: SAML settings is updated (#1556)
* chore: getFeatureFlag is implemented
* feat: authDomain are added
2022-10-03 21:27:42 +05:30
Priyanka Chakraborty
3bbe2f4f58 1363 externalcall querybuilder (#1550)
* externaltab-promql-to-querybuilder
* refactored the queries into separate file
* added logic for resourceattribute to tagFilter items conversion
* refactor: use useMemo

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-10-03 08:51:08 +05:30
Priyanka Chakraborty
1b1fb2f13b feat: route and breadcrumbs renamed to services (#1566)
* feat: route and breadcrumbs renamed to services
2022-10-03 07:02:03 +05:30
Prashant Shahi
a94bd9b99b introduce env for dashboards path in query-service (#1593)
* chore: 🔧 fetch dashboards path from DASHBOARDS_PATH env
* chore: 🚀 update docker files to include DASHBOARDS_PATH env
2022-10-03 05:48:54 +05:30
Nityananda Gohain
dcf2ac15b0 feat: add compression for materialized columns (#1585) 2022-10-03 05:45:59 +05:30
Vishal Gupta
9e9924943e feat: #1524 refresh button bug fix (#1582)
* feat: #1524 refresh button bug fix

* lint fixes

Co-authored-by: palashgdev <palashgdev@gmail.com>
2022-09-29 11:18:37 +05:30
Srikanth Chekuri
2c9794a6c6 fix: filter items can be empty (#1586) 2022-09-27 12:44:49 +05:30
Prashant Shahi
6b6f494574 fix: 🐛 update OTEL image (#1595)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-09-26 13:16:14 +05:30
Prashant Shahi
a3f11184e4 chore: 🔧 414 issue fix for large request URI (#1594)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-09-26 13:14:28 +05:30
Priyanka Chakraborty
cc3f36b62b 1587 update readme signoz vs jaeger (#1588)
* chore: update readme signoz vs jaeger
2022-09-22 22:03:50 +05:30
Koladele Olaitan
450602cd72 Removed Pranshu Chittora as a Frontend Maintainer (#1573) 2022-09-16 11:44:08 +05:30
Bryan Johnson
7088c22318 Fix minor grammar error in stale_version message for en and en-GB locales (#1570) 2022-09-14 21:26:58 +05:30
Pranay Prateek
b9af7e7ff3 update READMEs 2022-09-14 12:52:02 +05:30
Pranay Prateek
00389271cf chore: Introduce enterprise edition license (#1567)
* chore: Introduce enterprise edition license
2022-09-14 12:36:33 +05:30
Ankit Nayan
adda2e8a11 Merge pull request #1564 from SigNoz/release/v0.11.1
Release/v0.11.1
2022-09-14 10:21:54 +05:30
Prashant Shahi
ed2bbc5035 chore(release): 📌 pin versions: SigNoz-Otel-Collector 0.55.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-09-14 00:03:07 +05:30
Prashant Shahi
f1fdf78dc5 chore(release): 📌 pin versions: SigNoz 0.11.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-09-13 22:46:00 +05:30
Prashant Shahi
745fd07bd8 fix(lint): 🚨 format prometheus config YAML and remove trailing spaces
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-09-13 22:45:30 +05:30
Prashant Shahi
05de0ccba5 chore: 🔧 Add 414 issue fix for Frontend default.conf and Swarm
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-09-13 22:42:14 +05:30
palashgdev
c9139c5236 feat: height is updated (#1563) 2022-09-13 21:19:01 +05:30
palashgdev
0806397816 feat: webpack chunk name is updated (#1562) 2022-09-13 12:00:09 +05:30
Pranshu Chittora
c43dabdb0b fix: dashboard variable getting deleted on edit instances (#1561) 2022-09-12 23:24:45 +05:30
Vishal Sharma
eaadc3bb95 feat: introduce search trace ID component (#1551)
* feat: searchTraceID checkpoint
* feat: filter spans using TraceID from trace filter page

Co-authored-by: palashgdev <palashgdev@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-09-12 19:35:31 +05:30
Pranshu Chittora
1ec9248975 feat: getting started page (#1560)
Co-authored-by: palashgdev <palashgdev@gmail.com>
2022-09-12 16:26:01 +05:30
Srikanth Chekuri
0ccd7777bf fix: make widget plot work with missing data points (#1559) 2022-09-12 16:11:55 +05:30
Srikanth Chekuri
ac86d840f9 fix: reuse the query engine and storage for alerts pqlEngine (#1558) 2022-09-12 12:30:36 +05:30
Srikanth Chekuri
8556c87d46 feat: add support for dashboard variables (#1557) 2022-09-11 03:34:02 +05:30
Pranshu Chittora
461a15d52d feat: dashboard variables (#1552)
* feat: dashboard variables
* fix: variable wipe on few instances
* feat: error handling states
* feat: eslint and tsc fixes
2022-09-09 17:43:25 +05:30
Ankit Nayan
9e6d9019f7 chore: added group analytics 2022-09-08 21:05:54 +05:30
Ankit Nayan
578dafd1ff chore: fixed random number generation to match to maxRandInt 2022-09-07 15:20:56 +05:30
Ankit Nayan
99c0c97c1e chore: added sampling in analytics 2022-09-06 19:55:01 +05:30
Ankit Nayan
4875652ecb chore: added group analytics 2022-09-06 19:29:07 +05:30
Ankit Nayan
d170515d4d Merge pull request #1532 from SigNoz/release/v0.11.0
Release/v0.11.0
2022-08-24 20:11:36 +05:30
Prashant Shahi
73b00f405b Merge branch 'develop' into release/v0.11.0 2022-08-24 19:01:33 +05:30
Nityananda Gohain
ea8bd7047f Fix case mismatch in static fields. (#1537)
* case mismatch fix
* fix: undefined handled in flattened data
2022-08-24 18:59:44 +05:30
Prashant Shahi
596daefa7e compose changes: add default env and remove otel memory limit (#1536)
* chore(docker-compose): 🔥 remove memory limit

* chore: 🔧 include envs: ALERTMANAGER_API_PREFIX and SIGNOZ_LOCAL_DB_PATH
2022-08-24 18:58:44 +05:30
Pranshu Chittora
7081e4ffce fix: recursive url reloading (#1535) 2022-08-24 17:46:31 +05:30
Prashant Shahi
07dcdb51f7 chore: 🔧 enable logs capturing by default (#1534)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-08-24 15:11:38 +05:30
Prashant Shahi
d2e990ebf4 chore: 📌 pin versions: SigNoz 0.11.0, OtelCollector distribution 0.55.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-08-24 00:00:19 +05:30
Prashant Shahi
c2f95dc727 Merge branch 'develop' into release/v0.11.0 2022-08-23 22:22:49 +05:30
Aditya Kumar Praharaj
37dedc8b87 feat(devbox): splitting docker-compose.yaml into core and prod / local for no-edit local setup (#1528) 2022-08-23 15:23:37 +05:30
Srikanth Chekuri
9cd1be6553 Allow search by service name in services list page (#1520)
* feat: add search by service name
* chore: allow clear
* chore: table search icon and review comments
* chore: fix lint
* chore: address review comments
* chore: fix types
* chore: tweak user experience
* chore: antd color enum
2022-08-23 13:05:19 +05:30
Pranshu Chittora
5e0eb05a9c feat: support for legend in query builderformulas (#1530) 2022-08-23 11:17:49 +05:30
Pranshu Chittora
f48a884f90 fix: eslint and tsc fixes for logs (#1527)
* fix: eslint and tsc fixes for logs

* chore: remove package-lock file
2022-08-19 17:16:04 +05:30
Srikanth Chekuri
32fba00aa8 fix: do not show services without any data for select interval (#1521) 2022-08-17 15:11:08 +05:30
Pranshu Chittora
166e5612eb feat: restrict timestamp from adding it to query (#1517)
Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-08-16 18:53:34 +05:30
Ankit Nayan
b23d63cb2b Merge pull request #1515 from pranshuchittora/pranshuchittora/fix/sse-event-polyfill
fix: live tail sse prod issue
2022-08-16 13:33:45 +05:30
Pranshu Chittora
5e0ed6f5f5 chore: removed unused SSE libs 2022-08-16 13:07:05 +05:30
Pranshu Chittora
74f947a028 fix: live tail sse prod issue 2022-08-16 12:59:07 +05:30
Ankit Nayan
cca74e5926 Merge branch 'main' into develop 2022-08-11 18:53:38 +05:30
Ankit Nayan
1865d75df6 Merge pull request #1512 from SigNoz/release/v0.10.2
Release/v0.10.2
2022-08-11 18:52:34 +05:30
Prashant Shahi
d4b0013900 chore(prerelease): 📌 pin versions: SigNoz 0.11.0-rc.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-08-11 15:08:12 +05:30
Prashant Shahi
55c9eb733d chore(release): 📌 pin versions: SigNoz 0.10.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-08-11 14:58:36 +05:30
Amol Umbark
54cc363752 Alerts/edit rule issue 676 (#1505)
* fix: resolved issue with editing of rules

(cherry picked from commit a3015d1077)
2022-08-11 14:43:50 +05:30
Ankit Nayan
07d013a716 chore: added analytics for logs 2022-08-11 14:27:19 +05:30
Amol Umbark
a3015d1077 Alerts/edit rule issue 676 (#1505)
* fix: resolved issue with editing of rules
2022-08-11 13:54:17 +05:30
Nityananda Gohain
66b67a08a0 alias for timstamp interval changed in sql query (#1509) 2022-08-11 13:53:33 +05:30
Pranshu Chittora
7a4750a882 Logs UI (#1436)
* feat: logs routing
* feat: add redux for logs
* feat: logs filter ui
* feat: logsql parser integration
* feat: logs table initial version
* feat: logs aggregated view
* feat: add log detail
* feat: log live tail
* feat: Logs TTL UI
2022-08-11 11:45:28 +05:30
Nityananda Gohain
6d623c5d45 single otlp receiver (#1506) 2022-08-11 11:43:10 +05:30
Srikanth Chekuri
6e899175a0 fix: escape and encode operations regex for overview details (#1502)
* fix: escape and encode operations regex for overview details
* chore: use back tick with escaping
2022-08-11 11:41:32 +05:30
Amol Umbark
aecf3ef93e fix: added cache bursting for translations using file hash (#1478)
* fix: added cache bursting for translations using file hash


Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-08-10 23:09:34 +05:30
Nityananda Gohain
b6afc9315b clickhouse logs exporter added to deployment file (#1500)
* clickhouse logs exporter added to deployment file
* updated to latest otel collector
2022-08-10 23:00:05 +05:30
Srikanth Chekuri
dda82474ae feat: add more options in service map time dropdown (#1501) 2022-08-10 21:09:02 +05:30
Srikanth Chekuri
998e72374f fix: escape and encode operations regex for overview details (#1499)
* fix: interval should be 1d=24h (#1482) (#1483)

* fix: escape and encode operations regex for overview details

Co-authored-by: Ankit Nayan <ankit@signoz.io>
Co-authored-by: zedongh <248348907@qq.com>
2022-08-10 21:04:12 +05:30
Ankit Nayan
a1f6f09ae1 Merge pull request #1379 from nityanandagohain/feat/logs
Support for Logs
2022-08-10 15:29:03 +05:30
nityanandagohain
7a1cbdb0bb Merge remote-tracking branch 'origin/feat/logs' into feat/logs 2022-08-10 14:28:11 +05:30
nityanandagohain
eb28ece680 parser updated for pagination 2022-08-10 14:27:46 +05:30
Vishal Sharma
fda6e4472a Merge branch 'develop' into feat/logs 2022-08-09 10:45:23 +05:30
zedongh
9de99d1872 fix: interval should be 1d=24h (#1482) (#1483) 2022-08-09 09:52:55 +05:30
Ankit Nayan
8f9d0f2403 Merge pull request #1480 from SigNoz/release/v0.10.1
Release/v0.10.1
2022-08-07 15:35:29 +05:30
Prashant Shahi
04cf1b2697 Merge branch 'develop' into release/v0.10.1 2022-08-07 15:27:33 +05:30
Amol Umbark
8bdc41bef0 fix: resolves issue for migrated promql (#1481) 2022-08-06 13:40:41 +05:30
Prashant Shahi
616da88790 chore(release): 📌 pin versions: SigNoz 0.10.1, OtelCollector 0.45.1-1.3, Alertmanager 0.23.0-0.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-08-05 22:30:55 +05:30
Srikanth Chekuri
143a5b65f9 Merge branch 'develop' into feat/logs 2022-08-04 20:08:58 +05:30
nityanandagohain
0807a0ae26 Merge remote-tracking branch 'upstream/develop' into feat/logs 2022-08-04 17:32:45 +05:30
Amol Umbark
1ebf64589f Alerts: Test Notifications in Rules 2022-08-04 17:24:15 +05:30
Amol Umbark
80c96af5a4 feat: added user selected filtering of channels in alerts (#1459) 2022-08-04 15:31:21 +05:30
nityanandagohain
61ebd3aded logs ttl support added in ttl api 2022-08-04 14:28:10 +05:30
Vishal Sharma
425b732370 fix: add defaultDependencyGraphTable to getTTL status check (#1474) 2022-08-04 13:41:25 +05:30
Vishal Sharma
a742c9aee1 feat: use materialized view for usage explorer API (#1466)
* feat: use materialized view for usage explorer API

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-08-04 12:55:21 +05:30
Srikanth Chekuri
3968f11b3d feat: improve service map (#1467)
* feat: improve service map
2022-08-04 12:38:53 +05:30
Srikanth Chekuri
5bfc2af51b feat: show messaging/cron/browser services in listing page (#1455)
* feat: show messaging/cron/browser services in listing page

* chore: issue maximum of ten queries to clickhouse

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-08-04 11:57:05 +05:30
Amol Umbark
8146da52af feat: Disable Alerts Feature (Backend) (#1443)
* feat: added patch rule api

* feat: added backend api for patching rule status

* fix: improved patchRule and also editRule


Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-08-04 11:55:54 +05:30
Amol Umbark
5dc6d28f2e feat: disable alerts feature (UI) (#1445)
* feat: added enable disable feature for rules

* fix: resolved type issue in getTriggered

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-08-04 11:55:09 +05:30
Amol Umbark
a6ed6c03c1 Alerts/607 test notifications UI (#1469)
* feat: added test alert feature

* fix: solved the lint issues

Co-authored-by: Palash Gupta <palashgdev@gmail.com>
2022-08-03 19:40:20 +05:30
Vishal Sharma
cca4db602c Remove query-service code owners (#1461)
* Remove query-service code owners
2022-08-03 16:55:51 +05:30
Amol Umbark
7ff49ba47c feat: added rule url to the title link in slack message (#1421)
* feat: added rule url to the title link in slack message

* fix: corrected duplication of code for generator url in rules engine

* fix: removed unnecessary import in rules engine
2022-08-03 15:08:14 +05:30
nityanandagohain
9dcf913a74 severity_number type changed to int8 2022-08-03 12:23:00 +05:30
Prashant Shahi
68194d7e07 fix(query-service): 🚀 embed copy of timezone data (#1462)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-08-02 10:05:16 +05:30
Amol Umbark
7881aee350 feat: added user preferred channel filters in alerts (#1458)
* feat: added user preferred channel filters in alerts

* fix: resolved minor translation issue
2022-08-02 09:54:24 +05:30
nityanandagohain
594bfc256c fulltext validation updated 2022-08-01 13:02:00 +05:30
nityanandagohain
5894acdb2d OR support added with contains 2022-08-01 12:30:11 +05:30
nityanandagohain
6eb9389e81 parser updated to include or as well 2022-08-01 12:17:15 +05:30
Srikanth Chekuri
39be8201aa Merge pull request #1450 from SigNoz/bump-json-iterator
chore: bump json-iterator version to v1.1.12
2022-08-01 12:10:07 +05:30
Srikanth Chekuri
6778163a07 Merge branch 'develop' into bump-json-iterator 2022-08-01 09:13:04 +05:30
Amol Umbark
56a2047560 fix: remove 'default channel' note from channels page (#1446) 2022-07-31 09:54:58 +05:30
Srikanth Chekuri
023ef66035 Merge branch 'develop' into bump-json-iterator 2022-07-29 08:37:46 +05:30
Prashant Shahi
22b8572495 feat(swarm): 🚀 scraping multiple otel-collector (#1438)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-28 14:05:35 +05:30
jshiwam
e39d2f799d Used Prepared Statements for GetChannel in clickhousereader (#1414)
* feat: used db.Preparex
2022-07-28 10:14:27 +05:30
nityanandagohain
5b28fe1c9d Merge remote-tracking branch 'origin/feat/logs' into feat/logs 2022-07-27 15:59:24 +05:30
nityanandagohain
d15f9a1709 log statement corrected 2022-07-27 15:58:58 +05:30
Srikanth Chekuri
2c383528bc Merge branch 'develop' into bump-json-iterator 2022-07-27 14:41:50 +05:30
Srikanth Chekuri
0378dfd12f chore: bump json-iterator version to v1.1.12 2022-07-27 14:40:45 +05:30
Srikanth Chekuri
002ccc3975 Merge branch 'develop' into feat/logs 2022-07-27 12:53:32 +05:30
Prashant Shahi
f8f903848e fix: 🚀 disables TTL moves on insert and only run in background (#1448)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-27 11:53:10 +05:30
nityanandagohain
047227ad18 use ticker for polling db in live tail. 2022-07-27 11:47:35 +05:30
nityanandagohain
7b6a086b37 consistant query formatting 2022-07-27 10:46:33 +05:30
Nityananda Gohain
baf72610d6 Update pkg/query-service/app/clickhouseReader/reader.go
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-07-27 10:39:08 +05:30
nityanandagohain
a5388d357c Merge remote-tracking branch 'origin/feat/logs' into feat/logs 2022-07-26 14:50:55 +05:30
nityanandagohain
294d527a0e parser updated to support more than one contains 2022-07-26 14:45:20 +05:30
Srikanth Chekuri
a23788852a Merge branch 'develop' into feat/logs 2022-07-26 11:55:18 +05:30
jshiwam
a771c3b9a6 docs: added documentation for query-service local setup (#1426)
* docs: added documentation for query-service local setup

* fix: updated clickhouse setup link

* Use Env Var for the alertmanager endpoint
2022-07-26 09:59:31 +05:30
nityanandagohain
0fe4327877 live tail fetch only recent 100 logs every 10s 2022-07-25 14:42:58 +05:30
nityanandagohain
4825ed6e5f dataType constant strings 2022-07-22 17:19:55 +05:30
nityanandagohain
2f17898390 primitive type pointers removed 2022-07-22 16:49:40 +05:30
nityanandagohain
373cbbc375 logs select statement converted to a const 2022-07-22 16:07:19 +05:30
nityanandagohain
f8be4a6d5b livetail timestamp correction 2022-07-22 15:49:50 +05:30
nityanandagohain
420d46ab01 tail function updated to use values instead of pointers 2022-07-22 15:44:07 +05:30
nityanandagohain
bdb6901c74 generateSql returns value insted of pointer 2022-07-22 15:39:43 +05:30
nityanandagohain
94cde11164 consistant response value instead of pointer 2022-07-22 15:27:52 +05:30
nityanandagohain
448e14b32f parser updated to support contians operator for other fields 2022-07-22 15:17:46 +05:30
nityanandagohain
6ac7cb1022 parser updated 2022-07-21 18:32:11 +05:30
nityanandagohain
2132d1059c live tail api excluded from timeout middleware 2022-07-21 17:55:08 +05:30
Palash Gupta
ff9c41464b test: error and error details case is added (#1420) 2022-07-21 11:25:54 +05:30
Akshay Awate
acb3721815 refactor: start_docker() (#1410)
* refactor: start_docker()

Co-authored-by: akshay <akshay.awate@infracloud.io>
2022-07-20 23:30:21 +05:30
nityanandagohain
5912d3a4a0 observed timestamp removed 2022-07-20 14:52:16 +05:30
nityanandagohain
a527c33c7d timestamp in ns from ms 2022-07-20 13:05:24 +05:30
nityanandagohain
c24bdfc8cf aggregate function added 2022-07-20 12:11:03 +05:30
nityanandagohain
051f640100 correct var names in live tail 2022-07-19 16:38:28 +05:30
nityanandagohain
b5c8764605 changes added for live tail api 2022-07-19 16:34:33 +05:30
Amol Umbark
475c44a000 fix: increased debounce to 1000 (from 500). also reduced networkcalls in getquery range (#1423) 2022-07-19 11:30:56 +05:30
Amol Umbark
1b6597b974 fix: resolved issue with promql rule creation (#1422) 2022-07-19 11:29:32 +05:30
nityanandagohain
8e4fbbe770 parsing logic and test updated 2022-07-19 10:40:19 +05:30
nityanandagohain
2450fff34d live tail v1 2022-07-18 18:55:52 +05:30
nityanandagohain
df17d4ca54 Merge remote-tracking branch 'upstream/develop' into feat/logs 2022-07-18 16:49:04 +05:30
nityanandagohain
33e7252645 Merge remote-tracking branch 'origin/develop' into feat/logs 2022-07-18 16:44:27 +05:30
nityanandagohain
2e9affa80c Filtering logic updated 2022-07-18 16:37:46 +05:30
Ankit Nayan
bf2f3f8f5e Merge pull request #1402 from zriyansh/develop
update contributing.md file
2022-07-16 14:29:21 +05:30
Amol Umbark
d92aad38df fix: removed background color from sections in alert ui (#1413) 2022-07-16 12:08:17 +05:30
Pranay Prateek
78d13c94ae Merge branch 'develop' into develop 2022-07-15 23:43:04 +05:30
Ankit Nayan
73e699080d Merge pull request #1406 from SigNoz/release/v0.10.0
Release/v0.10.0
2022-07-15 22:19:13 +05:30
Prashant Shahi
b6aa378fae chore(release): 📌 pin versions: SigNoz 0.10.0, OtelCollector 0.45.1-1.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-15 21:46:32 +05:30
Amol Umbark
b5fe3d7fa1 fix: changed translation file for rules (#1405) 2022-07-15 21:33:30 +05:30
Amol Umbark
7c14a75c68 feat: Right side panel in create/edit alert UI (#1404)
* feat: metrics builder

* feat: tag key selection

* feat: metrics builder

* poc version

* added more changes to query builder

* added types for composite queries

* (feat): added edit rules and create rules forms

* added label key value input item

* (chore): added hidden labels for labelinput

* (chore): resolved some merge conflicts from develop

* (chore): added translations

* (chore): removed some old files in metric builder

* (chore): restored some of the files from develop branch

* (chore): restored env.ts

* (fix): solved empty builder queries errors

* (fix): changed queryIndex and formulaIndex type to string|number from number

* (feat): added chart preview for alert metric ui

* (feat): added threshold in chart, translations in alert form and a few fixes

* (fix): restoring env.ts

* (fix): placed threshold on horizontal line

* fix: resolved review comments

* fix: resolved label remove issue

* fix: removed console log

* fix: resolved issue with edit rule - old state values shown after update of threshold

* fix: resolved issue with match condition dropdown in alert ui

* fix: increased size of timeframe drop down

* fix: fixed label key value field and chart auto update when eval window changes

* feat: added a link for alert name in list alerts page and source for each rule update

* fix: resolved review coments in querysection of alerts ui

* feat: adding panel user guide in alerting form

* feat: added user guide panel in the alert form

* feat: added more help icon in user guide and fixed the sizing issue

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
2022-07-15 17:48:16 +05:30
Prashant Shahi
10c6325e46 chore(clickhouse): 🔊 update logging level to info (#1401)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-15 17:10:27 +05:30
Prashant Shahi
e4883495c3 fix(exceptions-page): 🚑 unix nanoseconds operations (#1403)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-15 16:40:45 +05:30
Palash
b9d63d6b8f feat: text is now ellipsed (#1392)
* feat: text is now ellipsed
2022-07-15 14:17:29 +05:30
Priyansh Khodiyar
65804f245c Merge branch 'SigNoz:develop' into develop 2022-07-15 13:38:54 +05:30
Ankit Anand
964b819f20 Update CONTRIBUTING.md (#1) 2022-07-15 13:38:39 +05:30
Palash
e22be60a9e Create dependency-review.yml (#1360)
* Create dependency-review.yml
2022-07-15 13:01:29 +05:30
Palash
b6a6833a64 test: utils unit case is updated (#1396) 2022-07-15 12:46:57 +05:30
Vishal Sharma
c90e9ffa34 fix: remove requirement of exceptionType and serviceName from errorDetail page URL (#1400)
* fix: remove requirement of exceptionType and serviceName from errorDetail page URL

* chore: id is updated

* chore: commented code is removed

* chore: eslint error is fixed

Co-authored-by: Palash <palashgdev@gmail.com>
2022-07-15 12:35:15 +05:30
Srikanth Chekuri
c5c7fb238f fix: update the error rate percentage text and scale (#1399) 2022-07-15 09:55:43 +05:30
Priyansh Khodiyar
4ad79bee18 add images 2022-07-14 22:51:51 +05:30
Priyansh Khodiyar
bebfaa1c4c Update CONTRIBUTING.md 2022-07-14 22:41:11 +05:30
Prashant Shahi
6fb7e34dbc chore: 🔧 otel-collector config changes (#1388)
* chore: 🔧 otel-collector config changes

* chore: 🗑️  remove redundant users.xml

* chore: 🔧 otel-config changes

- seperate scraper job for otel-collector and otel-collector-metrcs internal metrics
- use resourcedetection only for hostmetrics
- add swarm service name and task name in resource attributes env

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-14 19:36:19 +05:30
Amol Umbark
a2e1c41343 fix: edit form shows incorrect eval window when 24hours is saved (#1393)
* fix: edit form shows incorrect eval window when 24hours is saved

* fix: edit form shows incorrect eval window when 24hours is saved

* feat: added 4 hour window to alert ui

Co-authored-by: Palash <palashgdev@gmail.com>
2022-07-14 18:23:02 +05:30
Palash
46f258747a Merge pull request #1395 from mindhash/amol/alert-0-10-0-fix3
feat: remove global time selection on alerts pages
2022-07-14 18:15:33 +05:30
Amol Umbark
11c352741d Merge branch 'develop' into amol/alert-0-10-0-fix3 2022-07-14 17:34:50 +05:30
Amol
a63267cf90 feat: remove global time selection on alerts pages 2022-07-14 17:28:30 +05:30
Palash
3200248e98 fix: error page is updated (#1394) 2022-07-14 17:14:13 +05:30
Amol Umbark
a8c7237bbb Alert UI with metrics builder (#1359)
* added more changes to query builder

* added types for composite queries

* (feat): added edit rules and create rules forms

* (feat): added chart preview for alert metric ui

* (feat): added threshold in chart, translations in alert form and a few fixes

* feat: added a link for alert name in list alerts page and source for each rule update

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
2022-07-14 13:23:15 +05:30
Amol Umbark
3a287b2b16 Alerts backend with metrics (#1346)
Alerts backend with metrics
2022-07-14 11:59:06 +05:30
Srikanth Chekuri
c3d665e119 feat: make SigNoz rpc aware (#1366)
* feat: make SigNoz rpc aware
* chore: update the code/method logic
2022-07-14 10:24:03 +05:30
Srikanth Chekuri
8d03569a0a fix: error rate as a percentage of total requests (#1391) 2022-07-14 10:00:24 +05:30
Srikanth Chekuri
0620cacb0b Revert "ci: add golangci to workflow (#1369)" (#1390)
This reverts commit 7aeaecaf1f.
2022-07-14 09:55:01 +05:30
Priyansh Khodiyar
7da69f6a75 Update CONTRIBUTING.md 2022-07-14 00:21:59 +05:30
Srikanth Chekuri
7aeaecaf1f ci: add golangci to workflow (#1369)
* style: reformat the code to follow go guidelines
* chore: add golangci lint
* chore: remove context check
* chore: go fmt
2022-07-13 23:44:42 +05:30
Priyansh Khodiyar
3ea36092f6 Update CONTRIBUTING.md 2022-07-13 23:44:25 +05:30
Priyansh Khodiyar
8db4793ad6 change main branch to develop branch for contribution 2022-07-13 23:33:41 +05:30
Priyansh Khodiyar
83f3180641 Update CONTRIBUTING.md 2022-07-13 23:28:13 +05:30
Palash
64e638fd58 test: signup page and login page test are updated (#1351)
* test: sign-up test are updated
* test: fail test of version api is added
* test: more test case over signup page is added
* test: coverage is added
* chore: auth json is updated
* test: auth token and refresh token test is updated
2022-07-13 20:43:36 +05:30
Palash
5554cce379 feat: exception page is updated (#1376)
* chore: all error utils is added

* chore: error page list is added with total page and other handlings

* test: unit test case for order is added
2022-07-13 19:49:27 +05:30
Priyansh Khodiyar
3e2a6df200 Update CONTRIBUTING.md 2022-07-13 18:08:02 +05:30
Priyansh Khodiyar
3dc1dc970f v6 2022-07-13 18:00:46 +05:30
Priyansh Khodiyar
0ceaa56679 v6 2022-07-13 17:46:48 +05:30
Priyansh Khodiyar
ab52538e91 v5 2022-07-13 17:43:23 +05:30
Priyansh Khodiyar
ef69505bf9 remove arm version of docker-compose file 2022-07-13 17:22:31 +05:30
Priyansh Khodiyar
61b79742dc v4 2022-07-13 17:17:37 +05:30
Palash
4d1516e3fc chore: removed stale make commands (#1340)
Co-authored-by: Prashant Shahi <prashant@signoz.io>
2022-07-13 16:08:46 +05:30
Pranshu Chittora
0b08c80038 chore: tests for span to trace tree with missing spans support (#1368)
* chore: tests for span to trace tree with missing spans support
2022-07-13 15:59:22 +05:30
Vishal Sharma
a84754e8a8 perf: exception page optimization (#1287)
* feat: update ListErrors API

* feat: update error detail APIs and add a new API for fetching next prev error IDs

* feat: update GetNextPrevErrorIDs API to handle an edge case

* perf: use timestamp for fetching individual column

* feat: add countErrors API
2022-07-13 15:55:43 +05:30
Pranshu Chittora
a09a4c264e feat: change interval of PromQL queries (#1385) 2022-07-13 15:44:28 +05:30
nityanandagohain
ed5d217c76 API for filtering and paginating logs added 2022-07-13 15:42:13 +05:30
Priyansh Khodiyar
54e09e1292 v3 2022-07-13 13:48:18 +05:30
Priyansh Khodiyar
8477aebc8e V2 2022-07-13 12:27:28 +05:30
Priyansh Khodiyar
d7f7f20520 1st iteration 2022-07-13 02:23:06 +05:30
nityanandagohain
ef141d2cee API for fields added 2022-07-12 16:38:26 +05:30
Pranshu Chittora
4ee92b7c55 fix: query builder update legend on empty values (#1367) 2022-07-11 19:03:16 +05:30
Pranshu Chittora
80c80b2180 feat: missing spans handling by returning a forest of trees (#1365)
* feat: spanToTree 2.0

* feat: spanToTree EPIFI data

* feat: missing spans multiple trees

* chore: migrated to popoverss

Co-authored-by: Palash <palashgdev@gmail.com>
2022-07-08 16:18:08 +05:30
Srikanth Chekuri
da368ab5e8 feat: add support for not regex (#1328)
* feat: add support for not regex

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2022-07-06 15:49:27 +05:30
Ankit Anand
091d769ad8 Updated Typo (#1362) 2022-07-06 11:45:42 +05:30
Ankit Nayan
be814afeea Merge pull request #1357 from SigNoz/release/v0.9.2
Release/v0.9.2
2022-07-04 22:30:46 +05:30
Prashant Shahi
6697702c0f chore: 📌 pin signoz v0.9.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-07-04 21:27:11 +05:30
Ankit Nayan
5e93f266c1 Merge branch 'main' into develop 2022-07-04 20:45:49 +05:30
Palash
352c7ac581 Merge pull request #1322 from sf-pchakraborty/506-integrate-reactquery-devtools
chore: integrate reactquery devtools in dev env
2022-07-04 18:40:21 +05:30
Palash
db8f35cca2 Merge branch 'develop' into 506-integrate-reactquery-devtools 2022-07-04 18:28:09 +05:30
Palash
3af1d2b5bb Merge pull request #1335 from palashgdev/513-dashboard
fix: dashboard data is flushed from redux while creating the dashboard
2022-07-04 18:27:31 +05:30
Palash
58038c222f Merge branch 'develop' into 513-dashboard 2022-07-04 18:20:01 +05:30
Palash
b3b5459a08 Merge pull request #1341 from palashgdev/517-ts-commit-lint
chore: commitlint config is updated to ts
2022-07-04 18:19:48 +05:30
Palash
1bfc9877fc Merge branch 'develop' into 517-ts-commit-lint 2022-07-04 17:43:51 +05:30
Pranshu Chittora
8ce806169f docs: add list of project maintainers (#1354)
* docs: add list of project maintainers

Co-authored-by: Palash <palashgdev@gmail.com>
2022-07-04 17:18:23 +05:30
Srikanth Chekuri
3c7e0f66fa chore: bump SigNoz/prometheus to 1.9.73 (#1355) 2022-07-04 17:16:55 +05:30
Ankit Nayan
cbdeb5ad03 chore: added metrics for analytics (#1356) 2022-07-04 17:13:36 +05:30
Priyanka Chakraborty
5bbe1246cc Merge branch '506-integrate-reactquery-devtools' of https://github.com/sf-pchakraborty/signoz into 506-integrate-reactquery-devtools 2022-07-04 14:37:48 +05:30
Priyanka Chakraborty
789d65d7c4 chore: set initialIsOpen as true 2022-07-04 14:37:09 +05:30
Palash
5cbc8af4af Merge branch 'develop' into 506-integrate-reactquery-devtools 2022-07-04 14:31:22 +05:30
Palash
2cffe0c53e Merge branch 'develop' into 513-dashboard 2022-07-04 13:24:01 +05:30
Palash
cf0eb44143 Merge pull request #1339 from pranshuchittora/pranshuchittora/fix/custom-legend-for-empty-metrics
fix: legend for empty metrics names list
2022-07-04 13:23:26 +05:30
Palash
d7ce786f4b Merge branch 'develop' into 517-ts-commit-lint 2022-07-04 13:19:54 +05:30
Palash
ae5d4326a2 Merge branch 'develop' into 506-integrate-reactquery-devtools 2022-07-04 13:10:56 +05:30
Palash
6fa0209104 Merge branch 'develop' into pranshuchittora/fix/custom-legend-for-empty-metrics 2022-07-01 11:38:21 +05:30
Palash
50501ea80f Merge pull request #1348 from SigNoz/issue-1329
fix: add request/response interceptors for ApiV2Instance
2022-07-01 10:39:46 +05:30
Srikanth Chekuri
669dc05eec fix: add request/response interceptors for ApiV2Instance 2022-07-01 00:56:15 +05:30
Palash
e839920b3b Merge branch 'develop' into 517-ts-commit-lint 2022-06-30 21:35:18 +05:30
Ankit Nayan
32b4bbcaec Merge pull request #1347 from SigNoz/release/v0.9.1
Release/v0.9.1
2022-06-30 20:43:16 +05:30
Prashant Shahi
58b0c08d71 chore: 📌 pin SigNoz v0.9.1 and clickhouse in TTY mode
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-30 19:20:50 +05:30
Palash
dd9cbcee33 fix: decimal-precision is made undefined (#1344)
Co-authored-by: Prashant Shahi <prashant@signoz.io>
2022-06-30 19:02:31 +05:30
Palash
9c3b4508be Merge branch 'develop' into 517-ts-commit-lint 2022-06-30 02:55:33 +05:30
Palash
450407d0bf chore: commitlint config is updated to ts 2022-06-30 02:51:31 +05:30
Palash
276b26b170 Merge branch 'develop' into pranshuchittora/fix/custom-legend-for-empty-metrics 2022-06-30 00:38:08 +05:30
Palash
475723a03a Merge branch 'develop' into 513-dashboard 2022-06-29 22:52:57 +05:30
Palash
897728cc71 Merge pull request #1331 from palashgdev/sidebar-text
chore: css for light mode is updated in sidebar
2022-06-29 22:52:39 +05:30
Palash
bdf78cbf2c Merge branch 'develop' into sidebar-text 2022-06-29 22:44:25 +05:30
Pranshu Chittora
e88cfcd4da fix: legend for empty metrics names list 2022-06-29 16:24:49 +05:30
Ankit Nayan
90566360ae Merge pull request #1336 from SigNoz/release/v0.9.0
Release/v0.9.0
2022-06-29 15:26:36 +05:30
Prashant Shahi
0a6fa0ee85 chore(release): 📌 pin versions: SigNoz 0.9.0, OtelCollector 0.45.1-1.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-29 13:38:56 +05:30
Palash
ba35b3e442 fix: dashboard data is flushed from redux while creating the dashboard from import dashboard json 2022-06-29 12:35:45 +05:30
Palash
73c2137cd7 chore: css for light is updated 2022-06-29 09:14:27 +05:30
Prashant Shahi
dbe68c064c chore(install-script): 🗑️ remove arm YAML as already satisfied by single YAML (#1326)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-28 17:33:50 +05:30
Pranshu Chittora
ba7427f280 feat: Metrics Builder Enhancements and Code Cleanup (#1325)
* feat: improved ts typings
* chore: remove uncommented code
2022-06-28 17:32:02 +05:30
Palash
6dbc11991b feat: service map color is updated according to the darkMode (#1324) 2022-06-28 16:20:18 +05:30
Pranshu Chittora
eeae71163c fix: dashboard save layout (#1320)
Co-authored-by: Palash <palashgdev@gmail.com>
2022-06-28 16:19:46 +05:30
Palash
a25e7a64ce fix: light mode issues are fixed (#1319)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-06-28 16:18:53 +05:30
Palash
d0e272b679 fix: text is updated in the light and dark mode (#1318) 2022-06-28 16:18:16 +05:30
Priyanka Chakraborty
16ff59b4de chore: integrate reactquery devtools in dev env 2022-06-27 16:54:58 +05:30
Priyanka Chakraborty
f2074a9d0e chore: integrate reactquery devtools in dev env 2022-06-27 16:33:50 +05:30
Palash
47e6e00a64 test: playwright github action is updated (#1286)
* test: playwright github action is updated

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
2022-06-24 19:21:23 +05:30
Srikanth Chekuri
282c47def8 chore(deps): bump github.com/SigNoz/prometheus from v1.9.71 to v1.9.72 (#1317) 2022-06-24 15:19:20 +05:30
Pranshu Chittora
9d3fc493a3 feat: Metrics Query Builder (#1166)
* feat: metrics builder metrics name suggestion UX changes
* feat: metrics builder metric name and single value selection
* feat: code cleanup
* feat: improved ts typings

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2022-06-24 15:00:21 +05:30
Ankit Nayan
b2afb9aabc chore: changed scrape interval to 60s and batch size to 10000 (#1316)
* chore: changed scrape interval to 60s and batch size to 10000
* chore: added send_batch_max_size to the batch processor
2022-06-24 14:55:02 +05:30
Srikanth Chekuri
a733adad2c Add v2 query range metrics API (#1020)
* Queryrange params tests

* review suggestions, quantile, simple metric filter and some refactoring

* Add value type support

* Add supprot for re2 regex, refactor, update tests and other changes

* chore: update govaluate dep to signoz/govaluate

* chore: add name to grouping

* chore: add support for NOOP

* fix: make result format compatible with prom HTTP API

* chore: update clickhouse server and update query builder to use new schema

* chore: use metric_name in auto suggest APIs

* chore: add reduce operator and new aggregate functions

* chore: add support for not like op

* chore: fix the dip at the end for incomplete time range

* chore: rounddown the end to exclude the incomplete collection
2022-06-24 14:52:11 +05:30
Palash
cc18cc9087 Merge pull request #1312 from palashgdev/trace-filter-toolip
feat: light mode tooltip is updated
2022-06-24 11:54:55 +05:30
Palash
ecb2ed8ac8 Merge branch 'develop' into trace-filter-toolip 2022-06-24 11:47:12 +05:30
Palash
31931e5a6c Merge pull request #1311 from palashgdev/490-dashboard
feat: dashboard breadcrumb is added
2022-06-24 11:46:58 +05:30
Palash
31848c488d Merge branch 'develop' into trace-filter-toolip 2022-06-24 11:33:05 +05:30
Palash
bdcc997672 Merge branch 'develop' into 490-dashboard 2022-06-24 11:13:10 +05:30
Palash
d68334b2ca Merge pull request #1235 from palashgdev/store-fix-1
fix: using legacy_createStore instead of createStore as it seem it is depecreated
2022-06-24 00:34:54 +05:30
Palash
3ebded66ea Merge branch 'develop' into 490-dashboard 2022-06-24 00:28:55 +05:30
Palash
2ed24df250 Merge branch 'develop' into store-fix-1 2022-06-24 00:27:38 +05:30
Palash
5a34ce2221 Merge pull request #1304 from SigNoz/417-search-filter
feat: search filter is added in the trace filter
2022-06-24 00:23:22 +05:30
Palash
aae6a1adf1 Merge branch 'develop' into 417-search-filter 2022-06-24 00:11:37 +05:30
Palash
bef83d30cc feat: duration filter is updated (#1272)
* feat: duration filter is updated

* feat: search filter is added in the trace filter

Co-authored-by: Palash gupta <palash@signoz.io>
2022-06-23 19:12:43 +05:30
Palash
1ebf3dbf65 feat: select tags key and value are updated to autocomplete filtering (#1267)
* feat: select tags key and value are updated to autocomplete filtering
Co-authored-by: Palash gupta <palash@signoz.io>
2022-06-23 19:11:19 +05:30
Palash
f57808bdb4 Merge branch 'develop' into 417-search-filter 2022-06-23 18:43:21 +05:30
Palash
6bdcd4f5bb Merge branch 'develop' into 490-dashboard 2022-06-23 18:35:10 +05:30
Palash
d726ad9ca6 Merge branch 'develop' into trace-filter-toolip 2022-06-23 18:35:02 +05:30
Palash
4ed3295b80 Merge pull request #1239 from palashgdev/412-trace-detail
FE: Trace Detail text is now ellipsed
2022-06-23 18:34:42 +05:30
Palash
72dc4d62ce Merge branch 'develop' into 412-trace-detail 2022-06-23 18:27:55 +05:30
Palash
186f4dca71 feat: light mode tooltip is updated 2022-06-23 18:07:03 +05:30
Palash
e4f2219f8c feat: dashboard breadcrumb is added 2022-06-23 16:29:38 +05:30
Palash
fe9a6c2448 Merge pull request #1250 from palashgdev/1249-service-tab
feat: metrics is renamed to services in sidebar
2022-06-23 16:08:19 +05:30
Palash
5c2a875211 Merge branch 'develop' into 1249-service-tab 2022-06-23 16:01:25 +05:30
Palash
6dab77409d Merge pull request #1251 from palashgdev/1244-edit-alert
feat: rule id is passed as params
2022-06-23 16:00:37 +05:30
Palash
0f811af34e Merge branch 'develop' into 1244-edit-alert 2022-06-23 15:51:42 +05:30
Palash
bdbcbb5f6c Merge pull request #1264 from zedongh/develop
fix: GetMinMax with 'GLOBAL_TIME' and 'custom' need pass globalTime
2022-06-23 15:44:36 +05:30
Palash
ae91d7e8a9 Merge branch 'develop' into develop 2022-06-23 15:37:37 +05:30
Ankit Nayan
64927acd97 updated codeowners for query-service 2022-06-23 15:33:31 +05:30
Palash
9dae957c8f Merge branch 'develop' into develop 2022-06-23 15:30:07 +05:30
Ankit Nayan
afbcde5edc fix: added 404 for error in getRule api (#1309)
* fix: added multiple error checks in getRule api
2022-06-23 15:29:15 +05:30
Palash
b8c3fd1cbf test: test pipeline for unit test is configured (#1277)
* test: test pipeline is configured
Co-authored-by: Palash gupta <palash@signoz.io>
2022-06-23 15:26:44 +05:30
Palash
93cf5dfa46 Merge branch 'develop' into 1244-edit-alert 2022-06-23 15:06:47 +05:30
Palash
d2c28a47c2 Merge pull request #1268 from palashgdev/450-alerts
feat: alerts breadcrumb is added
2022-06-23 15:06:29 +05:30
Palash
9c68c6af93 Merge branch 'develop' into 412-trace-detail 2022-06-23 13:00:25 +05:30
Palash
3771f85c7d Merge branch 'develop' into 450-alerts 2022-06-23 12:41:40 +05:30
Palash
b39e0465b0 Merge branch 'develop' into 1249-service-tab 2022-06-23 12:38:48 +05:30
Palash
bc97ea8fc0 Merge pull request #1308 from palashgdev/417-search-filter
feat: condition is updated
2022-06-23 12:38:28 +05:30
Palash
1e980c3886 feat: condition is updated 2022-06-23 12:37:42 +05:30
Palash
5ec52f03ad Merge branch 'develop' into 417-search-filter 2022-06-23 12:35:39 +05:30
Palash
4aab923e40 Merge branch 'develop' into develop 2022-06-23 12:29:54 +05:30
Palash
17b0ee5434 Merge pull request #1306 from palashgdev/489-layout
feat: removed auto save layout from dashboard
2022-06-23 12:29:12 +05:30
Palash
08c3c4c51c Merge branch 'develop' into 1249-service-tab 2022-06-23 12:13:16 +05:30
Palash
5f802e0e20 Merge branch 'develop' into 489-layout 2022-06-23 10:55:20 +05:30
Palash
63e663a92d feat: removed auto save layout from dashboard 2022-06-23 10:54:15 +05:30
rw4nn
d21ab7b82d fix(FE): escape regular expression to filter dashboards with special characters (#1279)
* fix(FE): escape reg exp to filter dashboards
* test(FE): add type and use uuid v4
2022-06-23 10:25:55 +05:30
Prashant Shahi
84b876170d chore: clickhouse version bump (#1280)
* chore: clickhouse version bump
2022-06-23 10:15:21 +05:30
Palash
88d8dba90e Merge branch '417-search-filter' of https://github.com/SigNoz/signoz into 417-search-filter 2022-06-23 01:12:21 +05:30
Palash
d7d0d70aa5 chore: search filter is made conditional as filters need to be present 2022-06-23 01:12:12 +05:30
Palash
671b441ec9 Merge branch 'develop' into 417-search-filter 2022-06-23 01:09:59 +05:30
Palash
729c7fce7b chore: initial value is made 8 2022-06-23 01:08:51 +05:30
Palash
224ec8d0d9 feat: search filter is added in the trace filter 2022-06-23 01:07:02 +05:30
Palash
7eed865660 Merge branch 'develop' into develop 2022-06-22 23:47:16 +05:30
Palash
241121ebec chore: serivce name now ellipsed 2022-06-22 23:46:30 +05:30
Palash
15af158a9c Merge branch 'develop' into 412-trace-detail 2022-06-22 22:58:56 +05:30
Palash
2f02aeb031 Merge branch 'develop' into 1249-service-tab 2022-06-22 22:52:17 +05:30
Palash
3603e497a6 chore: error state is updated 2022-06-22 22:49:01 +05:30
Palash
070d32a0ef Merge branch 'develop' into 1244-edit-alert 2022-06-22 22:45:54 +05:30
Palash
0b36da714f Merge pull request #1276 from palashgdev/414-uri
feat: nginx uri issue is handled by increasing buffers
2022-06-21 17:36:41 +05:30
Palash
ce0ac1e3af Merge branch 'develop' into 414-uri 2022-06-21 17:29:55 +05:30
Palash
bcb5256de0 Update CODEOWNERS (#1265) 2022-06-21 17:25:59 +05:30
Palash
fdca72b9b2 chore: nginx config is updated 2022-06-21 16:09:17 +05:30
Palash
7f64dfd023 chore: nginx config is updated 2022-06-21 16:05:18 +05:30
Palash
8871d53ae0 nginx config is updated 2022-06-21 15:12:03 +05:30
Palash
2313ec3f9a Merge branch 'develop' into 1249-service-tab 2022-06-21 14:22:53 +05:30
Palash
56208c9b06 Merge branch 'develop' into store-fix-1 2022-06-19 14:32:05 +05:30
Palash gupta
84e281271c Merge branch '414-uri' of https://github.com/palash-signoz/signoz into 414-uri 2022-06-17 12:31:59 +05:30
Palash gupta
43e4f637d1 fix: remove fastcgi 2022-06-17 12:31:48 +05:30
Palash
c156b9c403 Merge branch 'develop' into 414-uri 2022-06-17 08:08:11 +05:30
Palash gupta
9885572842 feat: nginx uri issue is handled by increasing buffers 2022-06-17 08:07:33 +05:30
Palash
4803fd9c8e Merge branch 'develop' into 450-alerts 2022-06-15 11:45:35 +05:30
Palash
c2fe35388e Merge branch 'develop' into develop 2022-06-15 10:39:39 +05:30
zedongh
ba5e3dcfd3 fix: getMinMax with 'GLOBAL_TIME' and 'custom' need pass globalTimefeat (#1269) 2022-06-15 11:13:46 +08:00
Ankit Nayan
9c8c31d912 Merge branch 'develop' of https://github.com/SigNoz/signoz into develop 2022-06-15 02:45:49 +05:30
Ankit Nayan
469254e9fc Merge pull request #1266 from SigNoz/release/v0.8.2
Release/v0.8.2
2022-06-15 02:45:01 +05:30
Prashant Shahi
1f2ec0d728 chore(release): 📌 pin versions: SigNoz 0.8.2, OtelCollector 0.45.1-0.3
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-15 01:47:04 +05:30
Palash
ff1fc83b66 Merge branch 'develop' into 450-alerts 2022-06-15 01:35:28 +05:30
Palash gupta
0a5eff2255 feat: alerts breadcrumb is added 2022-06-15 01:34:56 +05:30
Ankit Nayan
24e84bac2a Create codeball.yml 2022-06-14 20:50:13 +05:30
Palash
db00a78a4e Merge branch 'develop' into 1244-edit-alert 2022-06-14 12:24:48 +05:30
Palash
4d2e8b0ea5 Merge branch 'develop' into 412-trace-detail 2022-06-14 12:24:22 +05:30
Srikanth Chekuri
4f12f8c85c fix: incorrect 5xx rate calculation (#1229) 2022-06-14 01:09:44 +05:30
Palash
fabab345cb Merge branch 'develop' into 412-trace-detail 2022-06-13 22:58:02 +05:30
Palash
00355b3383 Merge branch 'develop' into 1244-edit-alert 2022-06-13 19:07:07 +05:30
Palash gupta
c16ae790d4 feat: rule id is passed as params 2022-06-13 19:05:17 +05:30
Palash
c6d57a7a53 Merge branch 'develop' into 1249-service-tab 2022-06-13 18:39:52 +05:30
Palash gupta
d8775c91d7 feat: metrics is renamed to services in sidebar 2022-06-13 18:38:06 +05:30
Ankit Nayan
7b315c6766 Merge pull request #1246 from SigNoz/release/v0.8.1
Release/v0.8.1
2022-06-09 21:17:59 +05:30
Prashant Shahi
676fe892a5 chore(release): 📌 pin versions: OtelCollectors 0.45.1-0.2 and config changes
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-09 20:58:10 +05:30
Prashant Shahi
15260e0e14 Merge branch 'main' into release/v0.8.1 2022-06-09 17:24:12 +05:30
Prashant Shahi
ce7be6e7cd chore(release): 📌 pin versions: SigNoz 0.8.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-09 17:20:11 +05:30
palash-signoz
99d38860cb Merge pull request #1243 from pranshuchittora/pranshuchittora/feat/dashboard-save-rbac
feat(FE): save dashboard with RBAC permissions
2022-06-09 12:23:18 +05:30
Pranshu Chittora
1f4f281965 feat(FE): save dashbaord with RBAC permissions 2022-06-09 12:04:47 +05:30
palash-signoz
4aa4bf9ea2 Merge pull request #1242 from pranshuchittora/pranshuchittora/feat/dashboard-edit-permission
feat(FE): dashboard edit permission based on RBAC
2022-06-08 23:25:21 +05:30
Pranshu Chittora
052eb25cff chore(FE): sidebar red dot styling 2022-06-08 23:15:48 +05:30
Pranshu Chittora
ce14638a63 feat(FE): dashboard edit permission based on RBAC 2022-06-08 22:57:34 +05:30
palash-signoz
b3dfd567e0 Merge branch 'develop' into 412-trace-detail 2022-06-08 16:25:31 +05:30
Prashant Shahi
fa142707dc chore(alertmanager): 🔧 use query-service internalport (#1241)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-06-08 16:03:48 +05:30
Amol Umbark
5ae4e05c96 HTTP listener for internal services (#1238)
* feat: added private http server to handle internal service requests
* feat: added private port default to constants
2022-06-08 12:22:25 +05:30
palash-signoz
b7d52b8fba fix: dashboard is updated (#1240)
* fix: dashboard is updated

* fix: redux is made empty when creating dashbaord

Co-authored-by: Palash gupta <palash@signoz.io>
2022-06-08 11:50:41 +05:30
palash-signoz
660391c360 Merge branch 'develop' into store-fix-1 2022-06-07 16:47:18 +05:30
palash-signoz
1c90e62189 feat: dashboard layout is updated (#1221)
* feat: dashboard layout is updated

* feat: onClick is made fixed

* feat: layout is updated

* feat: layout is updated

* feat: layout is updated

* fix: memo is removed and grid layout component is refactored to use use query

* fix: saveDashboard is updated

* feat: layout is fixed

* fix: tsc error are fixed

* fix: delete widgets is updated

* fix: useMount once is added

* fix: useMount once is removed

* chore: removed the commented code

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-06-07 16:14:49 +05:30
palash-signoz
cfeb631a6e Merge pull request #1217 from palash-signoz/1215-signup
fix: button is disable until condition is met
2022-06-07 16:02:40 +05:30
Palash gupta
8a0bcf6cd9 feat: operation name is now ellipsed 2022-06-07 15:56:16 +05:30
Palash gupta
0c06c5ee0e fix: trace detail is updated 2022-06-06 17:06:16 +05:30
palash-signoz
f3610ffe55 Merge branch 'develop' into store-fix-1 2022-06-06 14:07:27 +05:30
Palash gupta
d150cfa46c fix: using legacy_createStore instead of createStore as it seem it is depecreated 2022-06-06 10:52:09 +05:30
palash-signoz
4fc4ab0611 Merge branch 'develop' into 1215-signup 2022-06-01 18:16:08 +05:30
palash-signoz
b107902c31 Merge pull request #1220 from palash-signoz/1219-video-update
chore: video link is updated
2022-06-01 18:15:52 +05:30
palash-signoz
2d83afd0c4 Merge branch 'develop' into 1215-signup 2022-05-31 11:20:29 +05:30
palash-signoz
e641577e1c Merge branch 'develop' into 1219-video-update 2022-05-31 11:19:49 +05:30
Palash gupta
3e4b56e012 chore: video link is updated 2022-05-31 11:18:52 +05:30
palash-signoz
697fd1d1bf Merge pull request #1209 from palash-signoz/dashboard-layout-fix
fix: layout is updated
2022-05-30 22:31:13 +05:30
palash-signoz
21dbdb57da Merge branch 'develop' into dashboard-layout-fix 2022-05-30 22:21:17 +05:30
palash-signoz
3406bcaa5f Merge branch 'develop' into 1215-signup 2022-05-30 22:06:05 +05:30
Palash gupta
de0fd64a5e fix: button is disable until condition is met 2022-05-30 22:04:45 +05:30
Pranay Prateek
c27c026e25 Update SECURITY.md 2022-05-30 17:22:47 +05:30
Pranay Prateek
0a4bc7e181 Update SECURITY.md 2022-05-30 17:22:20 +05:30
Pranay Prateek
b6cfe9d08e Update SECURITY.md 2022-05-30 17:14:01 +05:30
Pranay Prateek
b5b9f20b1f Update SECURITY.md 2022-05-30 17:13:34 +05:30
Pranay Prateek
25c6106bd6 Create SECURITY.md 2022-05-30 17:04:02 +05:30
Palash gupta
d5877337ec fix: layout is updated 2022-05-27 07:04:22 +05:30
Palash gupta
51e0972219 fix: layout is updated 2022-05-27 07:03:12 +05:30
palash-signoz
38c0bcf4ea fix: trace table is fixed (#1208) 2022-05-26 16:51:18 +05:30
palash-signoz
d863c2781a feat: dashboard layout is updated from widgets (#1207) 2022-05-26 15:09:59 +05:30
Prashant Shahi
642c6c5920 chore: TTL and S3 config related changes (#1201)
* fix: 🐛 convert TTL APIs to async

* chore: add archive support

* chore: update TTL async APIs according to new design

* chore: 🔥 clean removeTTL API

* fix: metrics s3 config

* feat: ttl async with polling (#1195)

* feat: ttl state message change and time unit language changes (#1197)

* test:  update tests for async TTL api

* feat: ttl message info icon (#1202)

* feat: ttl pr review changes

* chore: refractoring

Co-authored-by: makeavish <makeavish786@gmail.com>
Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
Co-authored-by: palash-signoz <palash@signoz.io>
Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-05-25 18:19:44 +05:30
Prashant Shahi
f92e4798ce refactor: ⚰️ Remove deprecated flattner and Druid leftover files (#1194)
* refactor: ⚰️ Remove flattner from Makefile

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* refactor: ⚰️ Remove deprecated Druid leftover files

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-05-25 18:06:14 +05:30
Vishal Sharma
5d080f5564 fix: 🐛 convert TTL APIs to async #902 (#1173)
* fix: 🐛 convert TTL APIs to async

* chore: add archive support

* chore: update TTL async APIs according to new design

* chore: 🔥 clean removeTTL API

* fix: metrics s3 config

* test:  update tests for async TTL api

* chore: refractoring

Co-authored-by: Pranay Prateek <pranay@signoz.io>
2022-05-25 16:55:30 +05:30
palash-signoz
eb9a8e3a97 feat: color is updated (#1198) 2022-05-25 10:48:53 +05:30
palash-signoz
4a13c524a3 chore: test result is added in the .gitignore (#1191)
* chore: test result is added in the .gitignore

* chore: cypress is removed from gitignore
2022-05-24 19:11:11 +05:30
palash-signoz
7c3edec3e6 Merge pull request #1190 from palash-signoz/react-version-resolution
fix: react version is made fixed
2022-05-24 18:37:07 +05:30
palash-signoz
199d6b6213 Merge branch 'develop' into react-version-resolution 2022-05-24 11:23:09 +05:30
palash-signoz
3d46abc1e9 Merge pull request #1189 from palash-signoz/1161-service-map
fix: handle the broken state in service map
2022-05-24 11:20:50 +05:30
palash-signoz
e6496ee67b Merge branch 'develop' into 1161-service-map 2022-05-23 16:05:09 +05:30
Pranay Prateek
fa6d5a7404 Merge branch 'develop' into react-version-resolution 2022-05-23 11:34:08 +05:30
Prashant Shahi
bd6153225f ci(build): 👷 Update build-pipeline workflow (#1187)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-23 11:33:33 +05:30
Palash gupta
bcceaf7937 fix: react version is made fixed 2022-05-22 21:25:12 +05:30
Palash gupta
4a287fd112 fix: handle the broken state 2022-05-22 20:59:35 +05:30
palash-signoz
8ec9cb2222 Merge pull request #1184 from pranshuchittora/pranshuchittora/fix/0.8.1/tsc
fix: ts typings and remove cypress types
2022-05-20 23:25:05 +05:30
palash-signoz
d3094e10bf Merge branch 'develop' into pranshuchittora/fix/0.8.1/tsc 2022-05-20 22:09:18 +05:30
Ankit Nayan
973ef56c09 Revert "feat: NODE_ENV is configured in the frontend" (#1186) 2022-05-20 18:08:20 +02:00
Prashant Shahi
26db6b5fcc Merge branch 'develop' into pranshuchittora/fix/0.8.1/tsc 2022-05-20 19:57:37 +05:30
Prashant Shahi
6e2afe1c78 fix(husky): 🚨 integrate is-ci and webpack-cli version bump (#1181)
* fix(husky): 🚨 integrate is-ci and webpack-cli version bump

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(frontend-Dockerfile): 🚀 remove NODE_ENV

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-20 19:14:58 +05:30
Pranshu Chittora
0bcd9d8d98 fix: ts typings and remove cypress types 2022-05-20 18:36:46 +05:30
Ankit Nayan
be01bc9b82 Revert "fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities (#1147)" (#1182)
This reverts commit 5a2ad9492c.
2022-05-20 14:08:12 +02:00
palash-signoz
5a2ad9492c fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities (#1147)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-ANSIREGEX-1583908

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2022-05-20 13:38:22 +02:00
Ankit Nayan
747677d4b0 Merge pull request #1152 from palash-signoz/feat/playwright
feat: playwright is configured
2022-05-20 13:33:37 +02:00
palash-signoz
e7f49cf360 Merge pull request #1178 from palash-signoz/tag-improvement
fix: tag style is updated
2022-05-20 15:46:04 +05:30
palash-signoz
3ba519457a Merge pull request #1179 from palash-signoz/metrics-application-active-key
fix: getActiveKey is refactoring into switch
2022-05-20 15:45:49 +05:30
palash-signoz
8d6646afed Merge pull request #1180 from SigNoz/prashant/frontend-docker
chore(docker): 🚀 Update Dockerfile and .dockerignore files
2022-05-20 15:38:24 +05:30
Prashant Shahi
a4cfb44953 chore(docker): 🚀 Update Dockerfile and .dockerignore files
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-20 15:07:47 +05:30
Palash gupta
c77ad88f90 fix: getActiveKey is refactoring into switch 2022-05-20 14:12:13 +05:30
palash-signoz
914be6e4cf Merge pull request #1177 from palash-signoz/set-retention
fix: set retention query is fixed
2022-05-20 14:04:21 +05:30
Palash gupta
2e9e29eb38 fix: tag style is updated 2022-05-20 14:02:46 +05:30
Palash gupta
bbed3fda22 fix: set retention query is fixed 2022-05-20 13:39:23 +05:30
Palash gupta
cbaf9b009c fix: merge conflit resolved 2022-05-20 13:29:08 +05:30
Ankit Nayan
8471dc0c1b Merge pull request #1151 from SigNoz/feat/gh-bot
feat: signoz gh-bot integration
2022-05-20 09:38:27 +02:00
Ankit Nayan
49175b3784 Merge pull request #1176 from SigNoz/feat/analytics-check-response
feat: added status code of api calls
2022-05-20 09:32:07 +02:00
Ankit Nayan
961dc7e814 feat: added status code of api calls 2022-05-20 09:31:23 +02:00
palash-signoz
1315b43aad Merge pull request #1174 from palash-signoz/compress
fix: version is made exact
2022-05-20 12:55:37 +05:30
Pranshu Chittora
9e6d918d6a fix: removed comment of PR workflow 2022-05-20 12:37:33 +05:30
palash-signoz
5b5b19dd99 Merge pull request #1170 from palash-signoz/full-view
feat: full view is updated to use query
2022-05-20 11:55:10 +05:30
palash-signoz
4b8bd2e335 feat: tags are added in the sidebar (#1153)
* feat: tags are added in the sidebar

* chore: styles is updated
2022-05-20 11:43:23 +05:30
palash-signoz
7d2883df11 Merge pull request #1169 from palash-signoz/392-tab-persist
feat: Tabs selection persist when we refresh
2022-05-20 11:41:40 +05:30
Palash gupta
cb4e465a10 Merge branch 'develop' into compress 2022-05-20 10:55:18 +05:30
palash-signoz
b1ee56b2f2 Merge pull request #1171 from palash-signoz/tsc-fix-2
fix: tsc is fixed
2022-05-20 10:53:08 +05:30
Palash gupta
98dfcead5b fix: version is made exact 2022-05-20 10:48:23 +05:30
Palash gupta
3cc4fb9c30 fix: tsc is fixed 2022-05-19 23:15:12 +05:30
Palash gupta
83cb099aa6 feat: full view is updated to use query 2022-05-19 23:08:27 +05:30
Srikanth Chekuri
c480b3c563 Add section outlining ideal workflow for significant features/changes (#1111) 2022-05-19 22:24:59 +05:30
Palash gupta
f084637f84 fix: merge conflict removed 2022-05-19 21:29:21 +05:30
Palash gupta
9fd8d12cc0 feat: Tabs selection persist when we refresh 2022-05-19 21:25:03 +05:30
palash-signoz
22f9069a29 Merge pull request #1130 from palash-signoz/trigger-alerts-error-handling
fix: error handling is updated for the trigger alerts
2022-05-19 19:34:06 +05:30
Ankit Nayan
42269a7c78 Merge pull request #962 from SigNoz/ttl-plus
Add remove TTL api, and do not allow zero or negative TTL
2022-05-19 16:01:17 +02:00
palash-signoz
2c62a1c0f0 Merge pull request #1163 from palash-signoz/389-trace-left-panel
feat: tooltip is added and max width is configured in the left panel to show text ellipsis
2022-05-19 16:43:39 +05:30
palash-signoz
b3729e0b6c Merge pull request #1167 from palash-signoz/1112-errors
fix: route is updated
2022-05-19 16:42:55 +05:30
Palash gupta
696a6adc32 fix: route is updated 2022-05-19 16:40:41 +05:30
palash-signoz
d964b66bcc Merge pull request #1145 from palash-signoz/bug-double-org
bug: double org is fixed
2022-05-19 16:09:28 +05:30
palash-signoz
4a4ad7a3da Merge pull request #1119 from palash-signoz/logout
fix: logout the user if api is not successfull
2022-05-19 16:09:14 +05:30
palash-signoz
03ef3d3bcd Merge pull request #1146 from palash-signoz/379-json-data
feat: dashboard error and loading state is removed from dashboard object
2022-05-19 15:29:45 +05:30
palash-signoz
d2913a2831 Merge pull request #1107 from palash-signoz/app-actions
chore: type is updated for thunk
2022-05-19 15:20:57 +05:30
palash-signoz
4ca3f1f945 Merge pull request #1133 from palash-signoz/develop-tsc-fix
fix: tsc is fix in cypress
2022-05-19 15:20:36 +05:30
palash-signoz
f2074f01e8 Merge pull request #1164 from palash-signoz/393-error-expection-tooltip
feat: tooltip is added in the error message and error type
2022-05-19 15:19:45 +05:30
palash-signoz
ffd5621f09 Merge pull request #1118 from palash-signoz/application-metrics-error-handling
fix: error is now handled and displayed as antd notification message in /application
2022-05-19 15:19:25 +05:30
Palash gupta
429e3bbd0d fix: logout is fixed 2022-05-19 13:43:54 +05:30
palash-signoz
3f37fe4d60 Merge pull request #1158 from palash-signoz/391-top-end-points
feat: top end point table is fixed
2022-05-19 13:30:07 +05:30
palash-signoz
ec3fed05bb Merge pull request #1132 from palash-signoz/commitlint
feat: commit lint is added in the frontend
2022-05-19 13:29:24 +05:30
palash-signoz
31583b73d8 Merge pull request #1159 from palash-signoz/390-metrics-pagination
feat: pagination is added in the application table
2022-05-19 13:29:05 +05:30
palash-signoz
02ba0eda9a Merge pull request #1154 from palash-signoz/341-url-encoding
feat: url encoding is added in the new dashboard query
2022-05-19 13:28:32 +05:30
palash-signoz
7185f2fa24 Merge pull request #1155 from palash-signoz/dashboard-widget-hover-resize
feat: resize handler is visible on hover
2022-05-19 13:27:16 +05:30
Palash gupta
ceb59e8bb5 fix: yarn is turned into npm 2022-05-19 13:24:46 +05:30
Ankit Nayan
f063a82133 Merge pull request #1124 from palash-signoz/env
feat: NODE_ENV is configured in the frontend
2022-05-19 08:46:30 +02:00
Palash gupta
072c137f26 feat: tooltip is added in the error message and error type 2022-05-19 08:48:19 +05:30
Palash gupta
358fc3a217 feat: tooltip is added and max width is configured in the left panel to show text ellipsis 2022-05-19 08:28:50 +05:30
Palash gupta
60d869ddbe feat: pagination is added in the application table 2022-05-18 22:26:41 +05:30
Palash gupta
286d46edbe feat: topend point table is fixed 2022-05-18 22:22:12 +05:30
palash-signoz
b66ce81eb6 Merge pull request #1127 from palash-signoz/remove-unneccesary-file
fix: removed unnecessary file
2022-05-18 09:50:39 +05:30
Palash gupta
60bb82ea9d feat: resize handler is visible on hover 2022-05-18 08:55:08 +05:30
Palash gupta
e3987206de feat: url encoding is added in the new dashboard query 2022-05-18 07:37:15 +05:30
Palash gupta
b8f8d59d40 feat: baseurl is added and grabbed from the env 2022-05-18 07:12:53 +05:30
Palash gupta
b2fc4776b7 feat: playwright is configured 2022-05-18 00:08:36 +05:30
Palash gupta
dd0047da07 feat: playwright is configured 2022-05-17 19:28:06 +05:30
palash-signoz
d3c67bad5b Merge pull request #1138 from pranshuchittora/pranshuchittora/fix/service-map-color
fix: service map label readable
2022-05-17 17:40:36 +05:30
Pranshu Chittora
ff3b414645 feat: signoz gh-bot integration 2022-05-17 17:23:06 +05:30
Ankit Nayan
104256dcb5 Merge pull request #926 from prashant-shahi/prashant/pprof
feat(query-service):  integrate pprof
2022-05-17 10:25:19 +02:00
Ankit Nayan
38d89fc34a Merge pull request #1136 from SigNoz/prashant/nginx-cache-improvement
chore: 🔧 improve nginx cache configuration
2022-05-17 10:22:20 +02:00
Palash gupta
a2d67f1222 fix: isProduction is removed 2022-05-17 11:55:30 +05:30
palash-signoz
8e360e001f Merge pull request #1126 from palash-signoz/alerts-rules-error-handling
fix: list alerts rules is handled
2022-05-17 11:52:06 +05:30
palash-signoz
de3928c51f Merge pull request #1128 from palash-signoz/error-handling-error-detail
fix: error details error is handled
2022-05-17 11:49:50 +05:30
Palash gupta
228fb66251 feat: dashbaord error and loading state is removed from dashboard object 2022-05-13 12:59:35 +05:30
Palash gupta
12c14f71ba bug: bug double org is fixed 2022-05-13 11:15:10 +05:30
Prashant Shahi
80de9efa0e refactor(query-service): 🔊 update pprof server error log
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-13 10:53:43 +05:30
palash-signoz
3890e06d29 Merge pull request #1123 from palash-signoz/trace-handling
fix: error handling is updated in trace
2022-05-13 10:44:15 +05:30
Palash gupta
a34dbc4942 chore: error checking condition !=200 is moved to >=400 2022-05-13 10:43:26 +05:30
Palash gupta
4b591fabf7 chore: error detail is updated 2022-05-13 10:38:32 +05:30
Palash gupta
cc978153f9 chore: added the new line 2022-05-13 10:32:36 +05:30
Prashant Shahi
9ba0b84a91 refactor(query-service): ♻️ move pprof to server.go
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-13 03:38:00 +05:30
Prashant Shahi
ac06b02d52 Merge branch 'develop' of github.com:signoz/signoz into prashant/pprof 2022-05-13 03:06:09 +05:30
Prashant Shahi
9c173c8eb3 docs(contributing): 📝 Update CONTRIBUTING.md docs (#877)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-12 22:46:43 +05:30
Srikanth Chekuri
d0b21fce01 update exec to clickhouse v2 api; update the queries 2022-05-12 16:22:59 +05:30
Pranshu Chittora
07ffd13159 fix: service map label readable 2022-05-12 15:47:47 +05:30
palash-signoz
1926998e3c fix: error is now handled in the login screen (#1120) 2022-05-12 13:43:20 +05:30
Srikanth Chekuri
eb397babcd resolve merge conflicts 2022-05-12 11:13:44 +05:30
Prashant Shahi
a0643aaf4e chore: 🔧 improve nginx cache configuration
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-11 10:53:51 +05:30
Palash gupta
169185ff89 chore: type is updated 2022-05-11 01:47:12 +05:30
Palash gupta
7feee26f85 fix: tsc is fix in cypress 2022-05-11 01:34:53 +05:30
Palash gupta
ce72b1e7a0 feat: commit lint is added in the frontend 2022-05-11 01:12:29 +05:30
Palash gupta
e06f020162 fix: error handling is updated for the trigger alerts 2022-05-10 18:30:22 +05:30
Palash gupta
574088ad54 fix: error state is updated 2022-05-10 18:23:47 +05:30
Palash gupta
6f48030ab9 fix: error details error is handled 2022-05-10 17:54:26 +05:30
Palash gupta
ea3a5e20d9 fix: removed unnecessary import 2022-05-10 17:32:22 +05:30
Palash gupta
b4833eeb0e fix: list alerts rules is handled 2022-05-10 17:27:04 +05:30
Palash gupta
ce67005d66 feat: NODE_ENV is configured in the frontend 2022-05-10 17:18:20 +05:30
Palash gupta
80c0b5621d fix: error handling is updated in trace 2022-05-10 17:09:39 +05:30
Palash gupta
b21a2707d3 fix: logout the user if api is not successfull 2022-05-10 14:11:16 +05:30
Palash gupta
4fa5ff9319 fix: error is now handled and displayed as antd notification message 2022-05-10 14:02:05 +05:30
palash-signoz
53528f1045 fix: name is updated to the tag_name in the version (#1116) 2022-05-10 12:59:15 +05:30
Ankit Nayan
9522bbf33b Merge pull request #1106 from SigNoz/release/v0.8.0
Release/v0.8.0
2022-05-06 12:16:51 +05:30
Prashant Shahi
3995de16f0 chore(release): 📌 pin versions: SigNoz 0.8.0, Alertmanager 0.23.0-0.1, OtelCollector 0.43.0-0.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-06 11:49:49 +05:30
Palash gupta
f149258de2 chore: type is updated for thunk 2022-05-06 11:27:23 +05:30
Prashant Shahi
da386b0e8e chore(nginx-config): 🔧 add cache control headers for UI (#1104)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-06 09:12:01 +05:30
palash-signoz
75cdac376f feat: cache headers is added (#1103) 2022-05-06 00:33:41 +05:30
palash-signoz
0ef13a89ed feat: updated password is updated (#1102) 2022-05-06 00:33:04 +05:30
palash-signoz
084d8ecccd chore: logout is updated (#1100) 2022-05-05 21:21:39 +05:30
Vishal Sharma
b9f3663b6c fix: exceptionStacktrace typo (#1098) 2022-05-05 20:04:59 +05:30
palash-signoz
4067aa5025 chore: handled the null case (#1096) 2022-05-05 16:16:13 +05:30
palash-signoz
ebf9316714 feat: sorting is updated (#1095) 2022-05-05 14:06:22 +05:30
palash-signoz
f5009abca6 chore: error sorting is updated (#1094) 2022-05-05 14:00:37 +05:30
palash-signoz
b16a793cbc Error is re-name to exceptions (#1093)
* chore: error is named to expections
2022-05-05 13:59:25 +05:30
Vishal Sharma
374a2415d9 fix: fix typo and return empty struct instead of null (#1092) 2022-05-05 13:58:39 +05:30
palash-signoz
3789e25a1e feat: stack trace is show in monaco editor (#1091) 2022-05-05 13:57:52 +05:30
palash-signoz
10ab057e29 chore: error details is updated (#1090)
* chore: error details is updated
2022-05-05 13:57:26 +05:30
Ankit Nayan
41b9129145 viewAccess to usage explorer 2022-05-05 13:19:42 +05:30
Pranshu Chittora
f5d10b72f0 fix: y-axis generic time unit conversion (#1088)
* fix: y-axis generic time unit conversion
2022-05-05 12:22:56 +05:30
Vishal Sharma
6fb6a576aa fix: lagInFrame and leadInFrame nullable (#1089)
https://github.com/ClickHouse/ClickHouse/pull/26521
2022-05-05 12:22:19 +05:30
palash-signoz
7cf567792a chore: error message is displayed to user in the error page (#1085) 2022-05-05 11:12:42 +05:30
Vishal Sharma
fe18e85e36 fix: error and exception type error (#1086) 2022-05-05 11:11:35 +05:30
palash-signoz
147476d802 bug: confirm password bug is fixed (#1084) 2022-05-05 10:16:32 +05:30
palash-signoz
c94f23a710 chore: reset password is updated for the user id not the logged in user id (#1082) 2022-05-04 23:19:49 +05:30
Pranshu Chittora
1a2ef4fde6 fix: y-axis time unit (#1081) 2022-05-04 22:46:33 +05:30
Ahsan Barkati
6c505f9e86 Fix error message (#1080) 2022-05-04 21:45:20 +05:30
palash-signoz
fb97540c7c chore: tsc is fixed (#1078) 2022-05-04 20:52:33 +05:30
palash-signoz
9cf5c7ef74 chore: new dashboard permission is added for editor and admin (#1077)
* chore: new dashboard permission is added for editor and admin
2022-05-04 20:40:49 +05:30
palash-signoz
6223e89d4c chore: logout is fixed (#1076) 2022-05-04 20:38:03 +05:30
Pranshu Chittora
62f8cddc27 fix: fixed graph axis and tooltip (#1075) 2022-05-04 19:30:57 +05:30
palash-signoz
ffae767fab chore: edit widget is allowed for admin and editor (#1074) 2022-05-04 19:16:30 +05:30
palash-signoz
c23f97c3d0 chore: placeholder is updated for signup name (#1069) 2022-05-04 18:06:25 +05:30
palash-signoz
11eb1e4f72 chore: error message is updated for signup (#1072) 2022-05-04 18:06:02 +05:30
palash-signoz
0554ed7ecb bug: members is fixed (#1073) 2022-05-04 18:05:37 +05:30
palash-signoz
ca4ce0d380 Merge pull request #1071 from pranshuchittora/pranshuchittora/fix/chart-y-axis-zero-values
fix: chart y-axis values not showing for values < 1
2022-05-04 17:55:59 +05:30
Ankit Nayan
65f50bb70d chore: changed permission for edit and delete alert channels (#1070) 2022-05-04 17:53:53 +05:30
Pranshu Chittora
dbe9f3a034 fix: chart y-axis values not showing for values < 1 2022-05-04 17:52:14 +05:30
palash-signoz
7cdd136f61 chore: redirection is added (#1063) 2022-05-04 17:37:11 +05:30
palash-signoz
21d5e0b71c text is made optional (#1064) 2022-05-04 17:36:53 +05:30
palash-signoz
fe53aa412b copy to clipboard is updated (#1065) 2022-05-04 17:36:11 +05:30
palash-signoz
6c5a48082b delete widget is added for admin and editor (#1066) 2022-05-04 17:35:48 +05:30
palash-signoz
b7adc27f02 chore: button is not made disable for firstName (#1067) 2022-05-04 17:34:50 +05:30
Ankit Nayan
67b4290846 chore: change in error message on register (#1068) 2022-05-04 17:33:54 +05:30
Ankit Nayan
8a7cbc8ad3 Update http_handler.go 2022-05-04 17:00:49 +05:30
Vishal Sharma
c74d87a21a fix: handle empty data scenarios (#1062) 2022-05-04 15:03:48 +05:30
Ahsan Barkati
6486425f46 fix(auth): Return 403 for forbidden requests due to rbac (#1060)
* Return error json for user not found
* Return 403 for rbac error
* Return not_found instead of internal_error for getInvite
2022-05-04 14:50:15 +05:30
palash-signoz
5b316afa12 chore: sorting key is updated (#1059) 2022-05-04 14:32:38 +05:30
Ankit Nayan
2dcc6fda77 chore: analtics changed after rbac (#1058) 2022-05-04 14:27:32 +05:30
palash-signoz
a2f570d78c Merge pull request #1057 from pranshuchittora/pranshuchittora/fix/resource-attribute-error-message 2022-05-04 14:11:57 +05:30
Pranshu Chittora
ef209e11d5 fix: resource attribute error message 2022-05-04 13:52:44 +05:30
palash-signoz
1851e76bca Merge pull request #1056 from pranshuchittora/pranshuchittora/fix/dashboard-widget-uuid
fix: dashboard uuid issue
2022-05-04 12:57:20 +05:30
Pranshu Chittora
fa23050916 fix: dashboard uuid issue 2022-05-04 12:56:19 +05:30
palash-signoz
79475bde71 Merge pull request #1054 from palash-signoz/interceptor-is-updated
chore: interceptor is updated
2022-05-04 12:35:50 +05:30
Palash gupta
039201acae chore: interceptor is updated 2022-05-04 12:31:37 +05:30
palash-signoz
22454abc4a chore: api is updated (#1053) 2022-05-04 12:02:14 +05:30
palash-signoz
4c8b7af0eb chore: routesToSkip is updated (#1052) 2022-05-04 12:01:36 +05:30
palash-signoz
5caf94f024 feat: permission is added in the dashboard button (#1051) 2022-05-04 01:31:44 +05:30
Ankit Nayan
ce0b37ca2e Update jwt.go 2022-05-04 01:06:45 +05:30
palash-signoz
5f529e1c10 feat: refresh token is fixed (#1049) 2022-05-04 01:05:38 +05:30
Prashant Shahi
05c923df9b chore: 📌 pin alertmanager and otelcollector version and changes (#1048)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-05-03 23:59:29 +05:30
palash-signoz
90637212bc feat: total count is generated from the filter (#1047)
* feat: total count is generated from the filter
2022-05-03 23:33:12 +05:30
palash-signoz
b58f45c268 chore: scroll is made auto (#1044) 2022-05-03 23:04:16 +05:30
Vishal Sharma
6a6fd44719 fix: convert serviceMap API to POST (#1046) 2022-05-03 23:03:47 +05:30
Vishal Sharma
81cc120539 fix: aggregate of avg and percentile (#1045) 2022-05-03 23:02:49 +05:30
Pranshu Chittora
831381a1ff fix: trace detail styling issue (#1043) 2022-05-03 21:27:44 +05:30
palash-signoz
fd0656e0fc chore: redirect the user to application is user is navigated to non logged in page (#1042) 2022-05-03 21:27:17 +05:30
palash-signoz
e217ea0c9c chore: default value is added when on unmount (#1041) 2022-05-03 21:24:55 +05:30
palash-signoz
bdf9333dcf chore: version is added in the src of Logo (#1039) 2022-05-03 21:24:02 +05:30
Pranshu Chittora
eae97d6ffc fix: migrate service map APIs from GET to POST (#1038) 2022-05-03 21:21:40 +05:30
palash-signoz
9f5241e82c chore: error message is updated (#1037) 2022-05-03 21:21:10 +05:30
palash-signoz
284eda4072 chore: placeholder is updated for the signup page (#1035)
* chore: placeholder is updated for the signup page
2022-05-03 21:20:36 +05:30
palash-signoz
63693a4185 chore: name field is hidden when empty name is received from invite details api (#1036) 2022-05-03 21:19:35 +05:30
palash-signoz
d9cf9071d3 chore: text for forgot password is updated (#1034) 2022-05-03 21:15:58 +05:30
palash-signoz
5e41c7f62b chore: header style is fixed in the light mode (#1033) 2022-05-03 21:15:23 +05:30
palash-signoz
e903277143 feat: sorting duration is added (#1032)
* feat: sorting duration is added in trace filter page
2022-05-03 21:14:40 +05:30
palash-signoz
f2def38df8 Merge pull request #1040 from pranshuchittora/pranshuchittora/fix/empty-graph-on-zero-values
fix: empty graph showing no data on zero values
2022-05-03 21:10:55 +05:30
Ankit Nayan
71e742fb2b Update otel-collector-config.yaml 2022-05-03 21:01:49 +05:30
Pranshu Chittora
571e087f31 fix: empty graph showing no data on zero values 2022-05-03 20:14:35 +05:30
Srikanth Chekuri
3e5f9f3b25 bugfix: missing destination name for DiskItem (#1031) 2022-05-03 19:13:32 +05:30
palash-signoz
c969b5f329 chore: Private Wrapper is updated (#1029)
* chore: Private Wrapper is updated
2022-05-03 17:11:07 +05:30
palash-signoz
5bcf42d398 chore: isPasswordNotValidMessage is updated (#1030) 2022-05-03 17:10:39 +05:30
palash-signoz
c81b0b2a8b Auth Wrapper is updated (#1028) 2022-05-03 16:01:41 +05:30
palash-signoz
d52308c9b5 feat: some routes are removed from the date picker to display (#1023) 2022-05-03 15:59:12 +05:30
Pranshu Chittora
7948bca710 feat: resource attributes based filter for metrics (#1022)
* feat: resource attributes based filtering enabled
2022-05-03 15:41:40 +05:30
palash-signoz
29c0b43481 bug: Rows per page setting is now working (#1026) 2022-05-03 15:30:08 +05:30
palash-signoz
9351fd09c2 bug: exclude param is added in the getTagFilters (#1025) 2022-05-03 15:29:54 +05:30
palash-signoz
59f32884d2 Feat(UI): Auth (#1018)
* auth and rbac frontend changes
2022-05-03 15:27:09 +05:30
Ahsan Barkati
6ccdc5296e feat(auth): Add auth and access-controls support (#874)
* auth and rbac support enabled
2022-05-03 15:26:32 +05:30
Amol Umbark
3ef9d96678 Pagerduty - Create, Edit and Test Features (#1016)
* enabled sending alerts to pagerduty
2022-05-03 11:28:00 +05:30
Vishal Sharma
642ece288e perf: Query-service Performance Improvements (traces) (#838)
*feat: Update query-service Go version to 1.17 #911
*chore: Upgrade to clickhouse versions v2 #751
*feat: Duration sorting in events table of Trace-filter page #826
*feat: Add grpc status code to traces view #975
*feat: added filtering by resource attributes #881
2022-05-03 11:20:57 +05:30
Pranshu Chittora
3ab4f71aa1 enhancement(FE): span time unit normalisation (#1021)
* feat: relevant span time unit on trace detail page

* fix: remove time unit on hover on flame graph
2022-04-29 14:33:57 +05:30
palash-signoz
b5be770a03 Merge pull request #1019 from palash-signoz/error-details-fix
bug: editor is updated in error details page

> Not sure about the product requirements though

tsc is giving error at develop
2022-04-29 10:27:06 +05:30
Palash gupta
08e3428744 feat: editor is updated in error details page 2022-04-27 22:21:57 +05:30
palash-signoz
b335d440cf Feat: import export dashboard (#980)
* feat: added import & export functionality in dashboards
2022-04-25 22:41:46 +05:30
cui fliter
1293378c5c fix some typos (#976)
Signed-off-by: cuishuang <imcusg@gmail.com>
2022-04-22 20:04:37 +05:30
palash-signoz
5424c7714f feat: Error exception (#979)
* feat: error expection page is made
2022-04-22 20:03:08 +05:30
palash-signoz
95311db543 feat: tagkey length check is added (#999) 2022-04-22 19:45:14 +05:30
palash-signoz
bf52722689 bug: list of rules is fixed when created and come back to all rules (#998) 2022-04-22 19:39:01 +05:30
Vishal Sharma
6064840dd1 feat: support gRPC status, method in trace table (#987) 2022-04-22 19:38:08 +05:30
Pranshu Chittora
182adc551c feat: dashboard search and filter (#1005)
* feat: enable search and filter in dashboards
2022-04-22 18:57:05 +05:30
Amol Umbark
2b5b79e34a (feature): UI for Test alert channels (#994)
* (feature): Implemented test channel function for webhook and slack
2022-04-22 16:56:18 +05:30
Amol Umbark
508c6ced80 (feature): API - Implement receiver/channel test functionality (#993)
* (feature): Added test receiver/channel functionality
2022-04-22 12:11:19 +05:30
Pranshu Chittora
3c2173de9e feat: new dashboard widget's option selection (#982)
* feat: new dashboard widget's option selection

* fix: overflowing legend

* feat: delete menu item is of type danger

* feat: added keyboard events onFocus and onBlur
2022-04-19 10:57:56 +05:30
palash-signoz
9a6bcaadf8 Merge pull request #1000 from palash-signoz/984-updated-response
feat: httpCode and httpStatus is updated to statusCode and method
2022-04-19 10:52:31 +05:30
Palash gupta
08bbb0259d feat: httpCode and httpStatus is updated to code and method 2022-04-19 00:21:30 +05:30
palash-signoz
93638d5615 Use fetch fix (#995)
* feat: useFetch in tag value is removed and moved to use query

* feat: useFetch in all channels is removed and moved to use query

* feat: useFetch in edit rule is removed and moved to use query

* feat: useFetch in general settings is removed and moved to use query

* feat: useFetch in all alerts is changed into use query
2022-04-18 15:24:51 +05:30
Ankit Nayan
844ca57686 Merge pull request #997 from SigNoz/dashboard-bug-fix
(bugfix): remove validation on post data id
2022-04-18 14:37:46 +05:30
Srikanth Chekuri
b2eec25f33 (bugfix): remove validation on post data id 2022-04-18 14:12:49 +05:30
Ankit Nayan
61d01fa2d5 feat: added action to verify that every pr has a linked issue 2022-04-15 11:32:19 +05:30
Ankit Nayan
a6bf6e4e07 Merge pull request #923 from SigNoz/uuid-server
chore: generate uuid on server side
2022-04-13 11:45:30 +05:30
palash-signoz
d454482f43 wdyr is updated (#981) 2022-04-11 17:17:31 +05:30
Pranay Prateek
f6aece6349 Update CONTRIBUTING.md 2022-04-08 10:13:36 -07:00
Pranay Prateek
dc9508269d Update commentLinesForSetup.sh
Updating frontend service line number
2022-04-08 10:12:58 -07:00
Pranay Prateek
a6c41f312d Update CONTRIBUTING.md 2022-04-08 10:09:24 -07:00
palash-signoz
f487f7420b Merge pull request #972 from palash-signoz/refetch-onwindow-focus
chore: refetchOnWindowFocus is made false to global level
2022-04-08 15:13:30 +05:30
palash-signoz
da8f3a6e81 Merge pull request #973 from palash-signoz/eslint-used-var-error
chore(eslint): @typescript-eslint/no-unused-vars is made to error
2022-04-08 15:13:20 +05:30
palash-signoz
d102c94670 bug: unused import is removed and two unwanted eslint rule is removed (#968) 2022-04-08 14:05:16 +05:30
palash-signoz
60288f7ba0 chore(refactor): Signup app layout (#969)
* feat: useFetch is upgraded to useFetchQueries

* chore: en-gb and common.json is updated over public locale
2022-04-08 13:49:33 +05:30
Palash gupta
0cbe17a315 chore(eslint): @typescript-eslint/no-unused-vars is made to error 2022-04-08 02:15:50 +05:30
Palash gupta
dce9f36a8e chore: refetchOnWindowFocus is made false to global level 2022-04-08 02:12:54 +05:30
Ankit Nayan
aa5100261d Merge pull request #956 from SigNoz/release/v0.7.5
Release/v0.7.5
2022-04-07 17:12:16 +05:30
Prashant Shahi
f4cc2a3a05 Merge branch 'develop' into release/v0.7.5 2022-04-07 15:32:44 +05:30
palash-signoz
041a5249b3 feat: onClick is added in the row (#966) 2022-04-07 13:11:27 +05:30
palash-signoz
a767697a86 Merge pull request #965 from palash-signoz/trace-filter-fix-incoming-from-metrics-page
Bug: Trace filter fix incoming from metrics page
2022-04-07 12:59:37 +05:30
palash-signoz
71cb70c62c Merge pull request #958 from palash-signoz/style-trace-search
bug: style-trace-search is fixed
2022-04-07 11:03:40 +05:30
Pranshu Chittora
647cabc4f4 feat: migrated trace detail to use query (#963)
* feat: migrated trace detail to use query

* fix: remove unused imports

* chore: useQuery config is updated

Co-authored-by: Palash gupta <palash@signoz.io>
2022-04-07 10:48:09 +05:30
Palash gupta
e864e33ad3 bug: value is updated on selection 2022-04-06 22:01:42 +05:30
Palash gupta
5bdbe792f5 chore: selected filter is not removed when user close panel 2022-04-06 21:28:17 +05:30
Palash gupta
399efb0fb2 bug: trace filter bug are collapsed 2022-04-06 21:19:12 +05:30
palash-signoz
4b72de6884 Merge pull request #959 from pranshuchittora/pranshuchittora/fix/ttl-enhancements
fix: general setting TTL enhancements
2022-04-06 16:34:07 +05:30
palash-signoz
9f1473e7de Merge pull request #957 from palash-signoz/trace-table-current-page
bug: current page is updated in redux to see the updated values
2022-04-06 16:33:11 +05:30
Srikanth Chekuri
d6c4df8b4b Add remove TTL api, and do not allow zero or negative TTL 2022-04-06 16:29:10 +05:30
Palash gupta
7150971dc0 chore: style is updated 2022-04-06 16:28:39 +05:30
Pranshu Chittora
d0846b8dd2 fix: general setting TTL enhancements 2022-04-06 12:40:14 +05:30
Palash gupta
ead6885b29 bug: style-trace-search is fixed 2022-04-06 11:33:56 +05:30
Palash gupta
d72dacdc1f bug: current page is updated in redux to see the updated values 2022-04-06 10:52:46 +05:30
Prashant Shahi
1d6ddd4890 chore(install-script): 🩹 fix email condition
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 01:37:07 +05:30
Prashant Shahi
58daca1579 chore(docker-compose): 🔧 add restart policy and frontend dependency
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 00:52:48 +05:30
Prashant Shahi
1e522ad8f1 chore(release): 📌 pin 0.7.5 SigNoz version and 0.6.1 Alertmanager version
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 00:19:13 +05:30
Prashant Shahi
8809105a8d Prashant/deploy changes (#955)
- set information log level in clickhouse logger config
- maximum logs size 150m (3 files each of 50m)

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-04-06 00:05:05 +05:30
palash-signoz
064c3e0449 chore: default text is updated (#954)
* text and title is updated
2022-04-05 23:13:12 +05:30
palash-signoz
2a348e916c feat: version page is added (#924)
* feat👔 : getLatestVersion api is added

* chore: VERSION page is added

* feat:  version page is added

* chore: all string is grabbed from locale

* chore: warning is removed

* chore: translation json is added

* chore: feedback about version is added

* chore: made two different functions

* unused import is removed

* feat: version changes are updated

* chore: if current version is present then it is displayed
2022-04-05 18:21:25 +05:30
palash-signoz
5744193f50 Merge pull request #953 from pranshuchittora/pranshuchittora/fix/trace-detail-error-span-color
fix: error color for spans having error on trace detail page
2022-04-05 18:00:08 +05:30
Pranshu Chittora
ccf352f2db fix: error color for spans having error on trace detail page 2022-04-05 17:02:39 +05:30
palash-signoz
6e446dc0ab Merge pull request #949 from pranshuchittora/pranshuchittora/feat/ttl-s3
feat(FE): TTL/s3 integration
2022-04-05 16:28:41 +05:30
Pranshu Chittora
566c2becdf feat: dynamic step size for the data for graphs (#929)
* feat: dynamic step size for the data for graphs

* fix: remove console.log

* chore: add jest globals

* feat: add step size for dashboard

* chore: undo .eslintignore
2022-04-05 16:09:57 +05:30
Pranshu Chittora
3b3fd2b3a9 chore: update type 2022-04-05 16:09:04 +05:30
Pranshu Chittora
eae53d9eff feat: condition changes 2022-04-05 16:05:46 +05:30
Pranshu Chittora
42842b6b17 feat: i18n support for setting route names 2022-04-05 15:51:38 +05:30
Pranshu Chittora
95f8dfb4bc feat: i18n support for settings page 2022-04-05 15:44:01 +05:30
palash-signoz
a8c5934fc5 fix: Fix jest (#945)
* bug: jest is now fixed

* chore: files are included for the eslint

* chore: build is fixed

* test: jest test are fixed
2022-04-05 14:47:37 +05:30
palash-signoz
3f2a4d6eac bug: Trace filter page fixes (#846)
* order is added in the url
* local min max duration is kept in memory to show min and max even after filtering by duration
* checkbox ordering does not change when the user selects or un-selects a checkbox
2022-04-05 13:23:08 +05:30
Pranshu Chittora
170609a81f chore: type changes 2022-04-05 12:26:43 +05:30
Pranshu Chittora
76fccbbba4 fix: styling changes 2022-04-05 12:19:54 +05:30
palash-signoz
147ed9f24b chore: editor config is added (#818) 2022-04-05 11:19:06 +05:30
palash-signoz
a69bc321a9 Merge pull request #935 from pranshuchittora/pranshuchittora/feat/graph-memory-issue
feat: FE memory fixes and UX enhancements
2022-04-05 10:35:08 +05:30
Pranshu Chittora
c9e02a8b25 feat: final touches to the ttl 2022-04-05 00:06:13 +05:30
Pranshu Chittora
24d6a1e7b2 feat: s3 ttl validation 2022-04-04 19:38:23 +05:30
Nishidh Jain
a0efa63185 Fix(FE) : Ask for confirmation before deleting any dashboard from dashboard list (#534)
* A confirmation dialog will pop up before deleting any dashboard

Co-authored-by: Palash gupta <palash@signoz.io>
2022-04-04 17:35:44 +05:30
Ankit Nayan
fd83cea9a0 chore: removing version api from being tracked (#950) 2022-04-04 17:07:21 +05:30
Pranshu Chittora
5be1eb58b2 feat: ttl api integration 2022-04-04 15:26:29 +05:30
Pranshu Chittora
8367c106bc Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/feat/ttl-s3 2022-04-04 15:06:33 +05:30
Pranshu Chittora
8064ae1f37 feat: ttl api integration 2022-04-04 15:06:06 +05:30
palash-signoz
ab4d9af442 Merge pull request #928 from palash-signoz/tag-value-suggestion
feat: Tag value suggestion
2022-04-04 12:52:34 +05:30
Palash gupta
eb0d3374d5 Merge branch 'develop' into tag-value-suggestion 2022-04-04 12:35:50 +05:30
palash-signoz
6c4c814b3f bug: pathname check is added (#948) 2022-04-04 10:25:15 +05:30
Palash gupta
32e8e48928 chore: behaviour for dropdown is updated 2022-04-04 08:24:28 +05:30
Naman Jain
53e7037f48 fix: run go vet to fix some issues with json tag (#936)
Co-authored-by: Naman Jain <jain_n@apple.com>
2022-04-02 16:15:03 +05:30
palash-signoz
a566b5dc97 bug: no service and loading check are added (#934) 2022-04-01 17:59:44 +05:30
Pranshu Chittora
4dc668fd13 fix: remove unused props 2022-04-01 17:43:56 +05:30
palash-signoz
d085506d3e bug: logged in check is added in the useEffect (#921) 2022-04-01 15:47:39 +05:30
palash-signoz
1b28a4e6f5 chore: links are updated for all dashboard and promql (#908) 2022-04-01 15:43:58 +05:30
Pranshu Chittora
20e924b116 feat: S3 TTL support 2022-04-01 15:12:30 +05:30
Ahsan Barkati
1d28ceb3d7 feat(query-service): Add cold storage support in getTTL API (#922)
* Add cold storage support in getTTL API
2022-04-01 11:22:25 +05:30
Ankit Nayan
0ff4c040bf Merge pull request #933 from SigNoz/release/v0.7.4
Release/v0.7.4
2022-03-29 23:32:49 +05:30
Prashant Shahi
1002ab553e chore(release): 📌 pin 0.7.4 SigNoz version and deployment changes
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-29 22:59:32 +05:30
Amol Umbark
3dc94c8da7 (fix): Duplicate alerts in triggered alerts (#932)
* (fix): Duplicate alerts in triggered alerts fixed by changing source api from /alert/groups to /alerts

* (fix): added comments for removed lines of group api call

* (fix): restored all getGroup
2022-03-29 19:59:40 +05:30
Pranshu Chittora
5a5aca2113 chore: remove unused code 2022-03-29 16:06:27 +05:30
Pranshu Chittora
cb22117a0f Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/feat/graph-memory-issue 2022-03-29 16:05:49 +05:30
Pranshu Chittora
739946fa47 fix: over memory allocation on Graph on big time range 2022-03-29 16:05:08 +05:30
Pranshu Chittora
7939902f03 fix: dashboard table element overflow (#930) 2022-03-29 12:24:03 +05:30
palash-signoz
d34e08fa3d chore: build.yml file is updated for more strict frontend checks (#906)
chore: build.yml file is updated for more strict frontend checks
2022-03-29 11:13:26 +05:30
Palash gupta
5556d1d6fc feat: tag value suggestion is updated 2022-03-29 09:59:50 +05:30
Palash gupta
d4d1104a53 WIP: value suggestion is added 2022-03-29 00:02:56 +05:30
Palash gupta
225a345baa chore: getTagValue api is added 2022-03-29 00:02:16 +05:30
Prashant Shahi
31443dabe7 feat(query-service): integrate pprof
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-28 21:44:40 +05:30
Amol Umbark
0efb901863 feat: Amol/webhook (#868)
webhook receiver enabled for alerts

Co-authored-by: Palash gupta <palash@signoz.io>
2022-03-28 21:01:57 +05:30
Srikanth Chekuri
eb4abe900c chore: generate uuid on server side 2022-03-28 09:14:40 +05:30
palash-signoz
e7ba5f9f33 bug 🐛 : on click tag filter is now fixed (#916) 2022-03-25 19:16:07 +05:30
palash-signoz
995232e057 Merge pull request #914 from palash-signoz/tsc-fix
chore: tsc fix are updated over frontend
2022-03-25 15:39:27 +05:30
Palash gupta
cc5d47e3ee chore: updated the type any 2022-03-25 12:39:26 +05:30
Palash gupta
b1de6c1d7d chore: link is reverted 2022-03-25 12:35:38 +05:30
Palash gupta
84bfe11285 chore: tsc error are fixed 2022-03-25 12:33:52 +05:30
Pranshu Chittora
ca78947a55 fix: save unit on dashboard without hitting apply (#912) 2022-03-25 12:29:40 +05:30
palash-signoz
ac49f84982 Merge pull request #3 from pranshuchittora/pranshuchittora/fix/tsc-fixes
fix: tsc fixes
2022-03-25 12:12:08 +05:30
Pranshu Chittora
cc47f02ebf fix: tsc fixes 2022-03-25 12:03:57 +05:30
Palash gupta
ac70240b72 chore: some tsc fix 2022-03-24 15:39:33 +05:30
palash-signoz
78b1a750fa husky: pre-commit hook is added (#904) 2022-03-24 15:06:57 +05:30
palash-signoz
d5a6336239 Merge pull request #903 from pranshuchittora/pranshuchittora/feat/transformed-labels-on-tooltips
feat(FE): unit label on graph tooltip
2022-03-24 14:23:47 +05:30
palash-signoz
01bad0f18a chore: eslint fix (#884)
* chore: eslint is updated

* chore: some eslint fixes are made

* chore: some more eslint fix are updated

* chore: some eslint fix is made

* chore: styled components type is added

* chore: some more eslint fix are made

* chore: some more eslint fix are updated

* chore: some more eslint fix are updated

* fix: eslint fixes

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
2022-03-24 12:06:57 +05:30
Pranshu Chittora
1b79a9bf35 feat: unit label on graph tooltip 2022-03-24 11:44:38 +05:30
Ankit Nayan
0426bf06eb Merge pull request #901 from SigNoz/release/v0.7.3
Release/v0.7.3
2022-03-24 01:22:42 +05:30
Prashant Shahi
3d8354fb99 chore(release): 📌 pin 0.7.3 SigNoz version
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-24 00:52:32 +05:30
Prashant Shahi
696241b962 chore(install-script): 🔧 amazon-linux improvements and fixes (#900)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-24 00:46:43 +05:30
Pranshu Chittora
8a883f1b5e feat: unit selection for value graph on dashboard (#898) 2022-03-23 22:03:57 +05:30
palash-signoz
7765cee610 feat: onClick feature is updated (#895) 2022-03-23 19:44:26 +05:30
Ankit Nayan
b958bad81f Merge pull request #897 from palash-signoz/896-monaco-editor-change
chore: onChange event is added
2022-03-23 19:43:59 +05:30
Palash gupta
deff5d5e17 chore: onChange event is added 2022-03-23 19:03:05 +05:30
Ankit Nayan
44d3f35a5f Merge branch 'release/v0.7.2' into develop 2022-03-23 12:41:13 +05:30
Ankit Nayan
36d8bc7bc6 Merge pull request #891 from SigNoz/release/v0.7.2
Release/v0.7.2
2022-03-23 12:38:18 +05:30
Prashant Shahi
565dfd5b52 chore(release): 📌 pin 0.7.2 SigNoz version
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-23 12:06:14 +05:30
palash-signoz
897c5d2371 Merge pull request #890 from pranshuchittora/pranshuchittora/fix/832
fix: top endpoints table overflow
2022-03-23 11:48:09 +05:30
Pranshu Chittora
f22d5f0fbd fix: top endpoints table overflow 2022-03-23 11:44:37 +05:30
Prashant Shahi
8c56d04988 chore(test-framework): 🔧 expose query service port 2022-03-23 10:31:35 +05:30
Prashant Shahi
18cfc40982 chore(Makefile): 🔧 include missed slash and formatting
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-23 01:25:13 +05:30
Prashant Shahi
3c66f9d2dd chore(test-framework): 🔧 remove frontend service and use latest tag from arm env
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-23 01:04:31 +05:30
Prashant Shahi
4f3bb95a77 chore(makefile): 🔧 add down-x86 and down-arm targets (#887)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-23 00:51:24 +05:30
Prashant Shahi
8aa5eb78b2 chore: 🔧 set dimensions_cache_size in signozspanmetrics processor (#885)
* chore: 🔧  set dimensions_cache_size in signozspanmetrics processor
- add example usage of limit_percentage and spike_limit_percentage

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-23 00:03:47 +05:30
Prashant Shahi
79a1f79b7c chore: 🔧 Add targets to clear docker standalone and swarm data (#886)
- remove sudo from run-arm and run-x86 targets

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-23 00:02:50 +05:30
palash-signoz
1c5c65ddf7 bug: useHistory is removed and dashboard loading component is removed (#802)
* bug: useHistory is removed and dashboard loading component is removed

* chore: new dashboard is updated

* chore: new dashboard is updated

* chore: sidenav is updated

* chore: getX console is removed

* chore: sidenav is updated with correct pathname
2022-03-22 21:56:12 +05:30
palash-signoz
b2e78b9358 Merge pull request #883 from pranshuchittora/pranshuchittora/fix/trace-filter-group-by
fix: trace filter groupby selection is breaking the FE
2022-03-22 17:15:17 +05:30
Pranshu Chittora
5e02bfe2e4 fix: trace filter groupby selection is breaking the FE 2022-03-22 17:12:03 +05:30
palash-signoz
02d89a3a04 feat(FE): react-i18next is added (#789)
* chore: packages are added

* feat: i18next is added

* feat: translation for the signup is updated

* chore: package.json is updated
2022-03-22 16:22:41 +05:30
Pranshu Chittora
3ab0e1395a feat: data time, UI and graph label consistency across FE (#878)
* feat: data time and graph label consistency across FE

* feat: saved state of sidebar and horizontal scroll fix for trace filter page

* feat: add Y-Axis unit for missing metrics graphs

* chore: update node version from 12.18 to 12.22

* fix: 24hr time unit on graph
2022-03-22 16:22:02 +05:30
palash-signoz
f1f606844a chore: Eslint fix config (#882)
* chore: eslint config is updated

* chore: eslint auto fix is added
2022-03-22 12:10:31 +05:30
Ahsan Barkati
4e335054fb chore(tests): Add end-to-end testing system for query service (#867)
* Initial work on s3

* some more work

* Add policy api

* Cleanup

* Add multi-tier TTL and remove storagePolicy API

* Cleanup

* Typo fix

* Revert constants

* Cleanup

* Add API to get disks

* Add more validations

* Initial work on e2e tests

* Basic ttl test

* Add test which checks for objects in Minio

* Address comments

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-03-22 00:03:20 +05:30
Ahsan Barkati
c902a6bac8 feat(query-service): Add cold storage support (#837)
* Initial work on s3

* some more work

* Add policy api

* Cleanup

* Add multi-tier TTL and remove storagePolicy API

* Cleanup

* Typo fix

* Revert constants

* Cleanup

* Add API to get disks

* Add more validations

* Cleanup
2022-03-21 23:58:56 +05:30
palash-signoz
c00f0f159b fix: save layout bug is resolved (#840)
* fix: save layout bug is resolved

* chore: onClick is also added in the component slider

* chore: dashboard Id is added

* chore: non dashboard widget is filtered out

* chore: panel layout stack issue is resolved
2022-03-21 21:04:32 +05:30
Prashant Shahi
86bdb9a5ad chore: deployment config changes (#869)
* chore(install-script): 🔧 include missed sudo_cmd variable

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🔧 add .gitkeep in folders to mount

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(docker-swarm): 🔧 Update deploy configurations

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(compose-yaml): 🔧 expose otlp ports and restart on failure policy

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-03-21 20:43:43 +05:30
Ankit Nayan
044f02c7c7 chore: version bumped for forked prometheus with clickhouse storage (#870) 2022-03-21 20:40:43 +05:30
Prashant Shahi
561d18efec chore(telemetry): add deployment type (#875)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-21 20:39:53 +05:30
palash-signoz
ab10a699b1 feat: timestamp is updated for selected start time (#852)
* feat: timestamp is updated for selected start time

* feat: startTime for the tree is updated
2022-03-17 11:56:25 +05:30
palash-signoz
e28733d246 feat: onClick in new tab is added (#842) 2022-03-16 23:26:45 +05:30
palash-signoz
a238123eb2 feat: monaco editor is updated (#851) 2022-03-16 23:24:27 +05:30
Prashant Shahi
4337ab5cd0 chore(install-script): remove mandatory sudo and digest improvement (#836)
* chore(install-script):  add fallback sha1sum command

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(install-script):  remove mandatory sudo and digest improvement

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(install-script): 🔧 use sudo_cmd variable

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(install-script): 🔧 remove depreciated druid section

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(install-script): 🔧 sudo prompt changes

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-16 22:59:20 +05:30
palash-signoz
7a3a3b8d89 bug: edit channel is fixed (#855) 2022-03-16 22:35:13 +05:30
palash-signoz
a9cbd12330 Merge pull request #861 from palash-signoz/trace-filter-fix-selected
chore: styled tab is updated to Tab from antd
2022-03-16 17:32:40 +05:30
Palash gupta
c320c20280 chore: styled tab is updated to Tab from antd 2022-03-16 17:09:27 +05:30
palash-signoz
b3c2fe75d3 Merge pull request #858 from palash-signoz/857-channels-dropdown
bug: global time selection dropdown is removed in the all channels page
2022-03-16 16:51:15 +05:30
Pranshu Chittora
95d3a27769 Merge pull request #845 from pranshuchittora/pranshuchittora/fix/trace-detail/events-error-handling
fix(FE): trace detail events error handling
2022-03-16 16:44:29 +05:30
palash-signoz
67f09c6def Merge pull request #860 from pranshuchittora/pranshuchittora/feat/y-axis-unit-selection
feat: y-axis units for pre-defined and dashboard graphs
2022-03-16 16:36:09 +05:30
Pranshu Chittora
eaeba43179 Merge pull request #835 from pranshuchittora/pc/feat/shared-styles-for-styled-components
feat(FE): new trace detail page code cleanup and enhancements
2022-03-16 16:29:50 +05:30
Pranshu Chittora
daadc584ea feat: update ts type 2022-03-16 16:28:11 +05:30
Pranshu Chittora
da8b16f588 feat: Updates TS type 2022-03-16 16:27:06 +05:30
Pranshu Chittora
17738a58a2 Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/feat/y-axis-unit-selection 2022-03-16 16:24:40 +05:30
Pranshu Chittora
3ebffae1c6 chore: revert eslint --debug flag 2022-03-16 16:16:20 +05:30
Pranshu Chittora
2ca67f1017 Merge pull request #844 from pranshuchittora/pranshuchittora/feat/x-axis-adaptive-lables
feat(FE): adaptive x axis time labels
2022-03-16 16:14:49 +05:30
Pranshu Chittora
00c7eccb0c fix: remove any type 2022-03-16 16:14:27 +05:30
Pranshu Chittora
7f3d9e2e35 feat: PR review changes 2022-03-16 16:05:21 +05:30
Pranshu Chittora
a95656b3a0 fix: x axis label when the time stamp is not parsed 2022-03-16 15:29:23 +05:30
Pranshu Chittora
9404768f9d Merge branch 'develop' of github.com:SigNoz/signoz into pc/feat/shared-styles-for-styled-components 2022-03-16 13:28:55 +05:30
Pranshu Chittora
7559445ebe Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/fix/trace-detail/events-error-handling 2022-03-16 13:10:26 +05:30
Pranshu Chittora
112766b265 Merge branch 'pranshuchittora/fix/trace-detail/events-error-handling' of github.com:pranshuchittora/signoz into pranshuchittora/fix/trace-detail/events-error-handling 2022-03-16 13:08:43 +05:30
Pranshu Chittora
ccf5af089d Merge pull request #856 from palash-signoz/eslint-fix
BUG(UI): eslint fixes are updated
2022-03-16 11:52:46 +05:30
Pranshu Chittora
0b6f31420b feat: y-axis units for pre-defined and dashboard graphs 2022-03-15 15:18:33 +05:30
Palash gupta
b4ce805c6f bug: global time selection dropdown is removed in the all channels page 2022-03-15 10:09:32 +05:30
Palash gupta
08f24fbdff chore: function is made async 2022-03-14 21:34:22 +05:30
Palash gupta
191925b418 Merge branch 'develop' into eslint-fix 2022-03-14 20:15:21 +05:30
Palash gupta
84b70c970f chore: eslint fixes are updated 2022-03-14 20:12:42 +05:30
Pranay Prateek
988ce36047 Update README.md 2022-03-11 18:54:28 +05:30
Pranay Prateek
24a4177a73 Update README.md 2022-03-11 18:53:37 +05:30
Pranshu Chittora
08ca3b7849 bug: timeline interval is updated
fix: add if condition for timeline interval

chore: remove mock response
2022-03-11 16:40:58 +05:30
Pranshu Chittora
d0723207c3 chore: remove mock response 2022-03-11 16:38:13 +05:30
Pranshu Chittora
16cf829ec3 Merge branch 'develop' of github.com:SigNoz/signoz into pranshuchittora/fix/trace-detail/events-error-handling 2022-03-11 16:37:15 +05:30
Pranshu Chittora
23f9949fad fix: trace detail events error handling 2022-03-11 16:32:11 +05:30
Pranshu Chittora
fafdd4b87f Merge pull request #828 from palash-signoz/timeline-fix
bug(FE): timeline interval is updated
2022-03-11 16:30:40 +05:30
Pranshu Chittora
f2ace729fd fix: linting fixes 2022-03-11 13:42:05 +05:30
Pranshu Chittora
f0c627eebe chore: add unit tests 2022-03-11 13:39:04 +05:30
Pranshu Chittora
1bf8e6bef6 feat: better x-axis labels 2022-03-11 11:20:52 +05:30
Ankit Nayan
f37e6ef1d1 Merge pull request #831 from SigNoz/prashant/e2e-k3s-changes
ci: 💚 fix e2e-k3s workflow as needed with the chart changes
2022-03-09 20:00:28 +05:30
Pranshu Chittora
c3ebbfa8ca feat: new trace detail page code enhancemenets and cleanup 2022-03-09 10:43:02 +05:30
Pranay Prateek
12970d6975 Update README.md 2022-03-08 20:56:59 +05:30
Vishal Sharma
1112ff7e7a Remove gitpod support temporarily (#834)
Gitpod environment has issues which has to be resolved before adding to contributing docs.
2022-03-08 19:55:19 +05:30
palash-signoz
09fd877b2a Merge pull request #1 from pranshuchittora/pc/fix/pr-828
fix: add if condition for timeline interval
2022-03-07 16:32:42 +05:30
Pranshu Chittora
c04c0284dc fix: add if condition for timeline interval 2022-03-07 16:08:37 +05:30
Prashant Shahi
239cdad57b ci: 💚 fix e2e-k3s workflow with chart changes
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-03-07 16:07:38 +05:30
Pranshu Chittora
314f95a914 feat(FE): shared styled for custom styled components 2022-03-07 14:32:17 +05:30
Pranay Prateek
e070ba61cd Update repo-stats.yml 2022-03-06 13:23:49 +05:30
Pranay Prateek
79576b476f Create repo-stats.yml (#829) 2022-03-06 13:22:37 +05:30
Palash gupta
8e4f987cf6 bug: timeline interval is updated 2022-03-06 12:02:21 +05:30
Axay sagathiya
3fe3bde0c7 Fix: Update Documentation to configure front-end and run back-end. (#815)
* fix: add all the steps to run query-service

* fix: add step to add configuration to run frontend
2022-03-05 23:51:48 +05:30
Ankit Nayan
efe57ff15d Merge pull request #825 from SigNoz/release/v0.7.1
Release/v0.7.1
2022-03-04 14:11:41 +05:30
Ankit Nayan
b8a6a27fad release: v0.7.1 2022-03-04 13:23:30 +05:30
Ankit Nayan
fb3dbcf662 Merge pull request #819 from pranshuchittora/pc/feat/trace-detail/eclear
fix: expand and unexpand on active span path
2022-03-04 13:16:46 +05:30
Ankit Nayan
cb04979bb7 Merge pull request #823 from pranshuchittora/pc/fix/yarn-install-node-gyp-error
fix: install deps node-gyp error
2022-03-04 13:16:20 +05:30
Ankit Nayan
967b83a5d0 Merge pull request #824 from palash-signoz/dashboard-fixes
bug(dashboard): useCallback is removed
2022-03-04 13:16:02 +05:30
Palash gupta
3fd086db4d dashboard: useCallback is removed 2022-03-04 13:10:40 +05:30
Pranshu Chittora
9aedcc1777 fix: install deps node-gyp error 2022-03-04 12:49:13 +05:30
Ankit Nayan
0fb5b90e4e Merge pull request #822 from SigNoz/release/v0.7.0
Release/v0.7.0
2022-03-04 12:12:12 +05:30
Ankit Nayan
91bdb77a0e Revert "Release/v0.7.0 (#814)" (#820)
This reverts commit eb63b6da2a.
2022-03-04 12:08:54 +05:30
Pranshu Chittora
e7d2bb13dc fix: expand and unexpand on active span path 2022-03-04 11:34:33 +05:30
palash-signoz
39c3f67d86 Merge pull request #816 from palash-signoz/eslint
feat(eslint): eslint-plugin-react-hooks is added
2022-03-04 11:33:00 +05:30
palash-signoz
49d1015a72 Merge pull request #817 from palash-signoz/sonar-eslint-plugin
feat(eslint): sonar js plugin for eslint is added
2022-03-04 11:32:45 +05:30
Palash gupta
f3b2f30c82 feat(eslint): sonar js plugin for eslint is added 2022-03-04 10:07:52 +05:30
Palash gupta
ff9d81aefc feat: eslint-plugin-react-hooks is added 2022-03-04 10:04:06 +05:30
Prashant Shahi
eb63b6da2a Release/v0.7.0 (#814)
* feat(FE): dynamic step size of metrics page

* chore(tests): migrate to dayjs for generating timestamp

* bug: sorting of date is fixed

* feat: soring filter is added

* chore: typo is fixed

* feat(backend): support custom events in span

* fix: encode event string to fix parsing at frontend

* chore: styles is updated for the not found button

* chore: update otel-collector to 0.43.0

* fix: remove encoding

* fix: set userId as distinctId if failed to fetch IP

* Fe: Feat/trace detail (#764)

* feat: new trace detail page flame graph

* feat: new trace detail page layout

* test: trace detail is wip

* chore: trace details in wip

* feat: trace detail page timeline component

* chore: spantoTree is updated

* chore: gantchart is updated

* chore: onClick is added

* chore: isSpanPresentInSearchString util is added

* chore: trace graph is updated

* chore: added the hack to work

* feat: is span present util is added

* chore: in span ms is added

* chore: tooltip is updated

* WIP: chore: trace details changes are updated

* feat: getTraceItem is added

* feat: trace detail page is updated

* feat: trace detail styling changes

* feat: trace detail page is updated

* feat: implement span hover, select, focus and reset

* feat: reset focus

* feat: spanId as query table and unfurling

* feat: trace details is updated

* chore: remove lodash

* chore: remove lodash

* feat: trace details is updated

* feat: new trace detail page styling changes

* feat: new trace detail page styling changes

* feat: improved styling

* feat: remove horizontal scrolling

* feat: new trace detail page modify caret icon

* chore styles are updated

* Revert "chore: Trace styles"

* chore styles are updated

* feat: timeline normalisation

* chore: remove mock data

* chore: sort tree data util is added and selected span component is updated

* chore: trace changes are updated

* chore: trace changes are updated

* chore: trace changes are updated

* feat: refactored time units for new trace detail page

* chore: remove mockdata

* feat: new trace detail page themeing and interval loop fix

* chore: error tag is updated

* chore: error tag is updated

* chore: error tag is updated

* chore: error tag is updated

* chore: console is removed

* fix: error tag expand button

* chore: expanded panel is updated

* feat: scroll span from gantt chart intoview

* chore: trace detail is removed

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>

* bug: Trace search bug is resolved (#741)

* bug: Trace search bug is resolved

* bug: Trace search bug is resolved

* chore: parseTagsToQuery is updated

* chore: parseTagsToQuery is updated

* chore: parseTagsToQuery is updated

* chore: parseTagsToQuery is updated

* chore: ️ add hotrod template and install/delete scripts (#801)

* chore: ️ add hotrod template and scripts

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* refactor:  conditionally compute image

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* fix: 🩹 add signoz namespace

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🔨  fix namespace template in scripts

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* docs(hotrod): 📝 Add README for hotrod k8s

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Ankit Nayan <ankit@signoz.io>

* chore(release): 📌 pin SigNoz and OtelCollector versions

Signed-off-by: Prashant Shahi <prashant@signoz.io>

Co-authored-by: Pranshu Chittora <pranshu@signoz.io>
Co-authored-by: Palash gupta <palash@signoz.io>
Co-authored-by: makeavish <makeavish786@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-03-04 01:41:49 +05:30
Ankit Nayan
abe4940e74 Merge pull request #777 from SigNoz/release/v0.6.2
Release/v0.6.2
2022-02-25 22:15:05 +05:30
Ankit Nayan
8e621b6a70 Merge pull request #727 from SigNoz/prashant/hotrod-yaml
chore: 🔧 update default jaeger endpoint in hotrod manifest
2022-02-15 20:49:08 +05:30
Ankit Nayan
3302a84ab5 Merge pull request #714 from SigNoz/release/v0.6.1
Release: v0.6.1
2022-02-11 16:57:48 +05:30
1102 changed files with 67327 additions and 23066 deletions

6
.dockerignore Normal file
View File

@@ -0,0 +1,6 @@
.git
.github
.vscode
README.md
deploy
sample-apps

33
.editorconfig Normal file
View File

@@ -0,0 +1,33 @@
# EditorConfig is awesome: https://EditorConfig.org
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
# Matches multiple files with brace expansion notation
# Set default charset
[*.{js,py}]
charset = utf-8
# 4 space indentation
[*.py]
indent_style = space
indent_size = 4
# Tab indentation (no size specified)
[Makefile]
indent_style = tab
# Indentation override for all JS under lib directory
[lib/**.js]
indent_style = space
indent_size = 2
# Matches the exact files either package.json or .travis.yml
[{package.json,.travis.yml}]
indent_style = space
indent_size = 2

3
.github/CODEOWNERS vendored
View File

@@ -2,5 +2,6 @@
# Owners are automatically requested for review for PRs that changes code
# that they own.
* @ankitnayan
/frontend/ @palash-signoz @pranshuchittora
/frontend/ @palashgdev @pranshuchittora
/deploy/ @prashant-shahi
/pkg/query-service/ @srikanthccv

View File

@@ -1,50 +1,27 @@
name: build-pipeline
on:
pull_request:
branches:
- develop
- main
- v*
paths:
- 'pkg/**'
- 'frontend/**'
- release/v*
jobs:
get_filters:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
frontend: ${{ steps.filter.outputs.frontend }}
query-service: ${{ steps.filter.outputs.query-service }}
flattener: ${{ steps.filter.outputs.flattener }}
steps:
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@v2
id: filter
with:
filters: |
frontend:
- 'frontend/**'
query-service:
- 'pkg/query-service/**'
flattener:
- 'pkg/processors/flattener/**'
build-frontend:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install dependencies
run: cd frontend && yarn install
- name: Run Prettier
run: cd frontend && npm run prettify
continue-on-error: true
- name: Run ESLint
run: cd frontend && npm run lint
continue-on-error: true
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
@@ -52,26 +29,20 @@ jobs:
build-query-service:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build query-service image
- name: Build query-service image
shell: bash
run: |
make build-query-service-amd64
build-flattener:
build-ee-query-service:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build flattener docker image
- name: Build EE query-service image
shell: bash
run: |
make build-flattener-amd64
make build-ee-query-service-amd64

17
.github/workflows/codeball.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Codeball
on: [pull_request]
jobs:
codeball_job:
runs-on: ubuntu-latest
name: Codeball
steps:
# Run Codeball on all new Pull Requests 🚀
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
- name: Codeball
uses: sturdy-dev/codeball-action@v2
with:
approvePullRequests: "true"
labelPullRequestsWhenApproved: "true"
labelPullRequestsWhenReviewNeeded: "false"
failJobsWhenReviewNeeded: "false"

View File

@@ -0,0 +1,27 @@
on:
pull_request_target:
types:
- closed
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
jobs:
create_issue_on_merge:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:
- name: Checkout Codebase
uses: actions/checkout@v2
with:
repository: signoz/gh-bot
- name: Use Node v16
uses: actions/setup-node@v2
with:
node-version: 16
- name: Setup Cache & Install Dependencies
uses: bahmutov/npm-install@v1
with:
install-command: yarn --frozen-lockfile
- name: Comment on PR
run: node create-issue.js

22
.github/workflows/dependency-review.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v3
- name: 'Dependency Review'
with:
fail-on-severity: high
uses: actions/dependency-review-action@v2

View File

@@ -16,7 +16,9 @@ jobs:
uses: actions/checkout@v2
- name: Build query-service image
run: make build-query-service-amd64
env:
DEV_BUILD: 1
run: make build-ee-query-service-amd64
- name: Build frontend image
run: make build-frontend-amd64
@@ -52,14 +54,11 @@ jobs:
helm install my-release signoz/signoz -n platform \
--wait \
--timeout 10m0s \
--set cloud=null \
--set frontend.service.type=LoadBalancer \
--set query-service.image.tag=$DOCKER_TAG \
--set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl describe deploy/my-release-frontend -n platform | grep Image
kubectl describe statefulset/my-release-query-service -n platform | grep Image
kubectl get pods -n platform
kubectl get svc -n platform

24
.github/workflows/playwright.yaml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Playwright Tests
on: [pull_request]
jobs:
playwright:
defaults:
run:
working-directory: frontend
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: "16.x"
- name: Install dependencies
run: CI=1 yarn install
- name: Install Playwright
run: npx playwright install --with-deps
- name: Run Playwright tests
run: yarn playwright
env:
# This might depend on your test-runner/language binding
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}

View File

@@ -0,0 +1,20 @@
# This workflow will inspect a pull request to ensure there is a linked issue or a
# valid issue is mentioned in the body. If neither is present it fails the check and adds
# a comment alerting users of this missing requirement.
name: VerifyIssue
on:
pull_request:
types: [edited, synchronize, opened, reopened]
check_run:
jobs:
verify_linked_issue:
runs-on: ubuntu-latest
name: Ensure Pull Request has a linked issue.
steps:
- name: Verify Linked Issue
uses: hattan/verify-linked-issue-action@v1.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -11,6 +11,41 @@ on:
jobs:
image-build-and-push-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v1.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-query-service
image-build-and-push-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -43,7 +78,7 @@ jobs:
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-query-service
run: make build-push-ee-query-service
image-build-and-push-frontend:
runs-on: ubuntu-latest

25
.github/workflows/repo-stats.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
on:
schedule:
# Run this once per day, towards the end of the day for keeping the most
# recent data point most meaningful (hours are interpreted in UTC).
- cron: "0 8 * * *"
workflow_dispatch: # Allow for running this manually.
jobs:
j1:
name: repostats
runs-on: ubuntu-latest
steps:
- name: run-ghrs
uses: jgehrcke/github-repo-stats@v1.1.0
with:
# Define the stats repository (the repo to fetch
# stats for and to generate the report for).
# Remove the parameter when the stats repository
# and the data repository are the same.
repository: signoz/signoz
# Set a GitHub API token that can read the stats
# repository, and that can push to the data
# repository (which this workflow file lives in),
# to store data and the report files.
ghtoken: ${{ github.token }}

18
.gitignore vendored
View File

@@ -1,3 +1,4 @@
node_modules
yarn.lock
package.json
@@ -5,6 +6,7 @@ package.json
deploy/docker/environment_tiny/common_test
frontend/node_modules
frontend/.pnp
frontend/i18n-translations-hash.json
*.pnp.js
# testing
@@ -15,6 +17,7 @@ frontend/build
frontend/.vscode
frontend/.yarnclean
frontend/.temp_cache
frontend/test-results
# misc
.DS_Store
@@ -27,10 +30,6 @@ frontend/npm-debug.log*
frontend/yarn-debug.log*
frontend/yarn-error.log*
frontend/src/constants/env.ts
frontend/cypress/**/*.mp4
# env file for cypress
frontend/cypress.env.json
.idea
@@ -42,4 +41,15 @@ frontend/cypress.env.json
frontend/*.env
pkg/query-service/signoz.db
pkg/query-service/tests/test-deploy/data/
ee/query-service/signoz.db
ee/query-service/tests/test-deploy/data/
# local data
*.db
/deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/
bin/

View File

@@ -4,4 +4,4 @@
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml
sed -i 38,62's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml

View File

@@ -1,134 +1,368 @@
# How to Contribute
# Contributing Guidelines
There are primarily 2 areas in which you can contribute in SigNoz
## Welcome to SigNoz Contributing section 🎉
- Frontend ( written in Typescript, React)
- Backend - ( Query Service - written in Go)
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
- We accept contributions made to the [SigNoz `develop` branch]()
- Find all SigNoz Docker Hub images here
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
## Finding contributions to work on 💬
# Develop Frontend
Looking at the existing issues is a great way to find something to contribute on.
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
### Contribute to Frontend with Docker installation of SigNoz
## Sections:
- [General Instructions](#1-general-instructions-)
- [For Creating Issue(s)](#11-for-creating-issues)
- [For Pull Requests(s)](#12-for-pull-requests)
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
- [Develop Frontend](#3-develop-frontend-)
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
- [Other Ways to Contribute](#other-ways-to-contribute)
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
- run `cd deploy` to move to deploy directory
- Install signoz locally without the frontend
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
- `yarn install`
- `yarn dev`
# 1. General Instructions 📝
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
## 1.1 For Creating Issue(s)
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
### Contribute to Frontend without installing SigNoz backend
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
#### Details like these are incredibly useful:
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
- `yarn install`
- `yarn dev`
- **Requirement** - what kind of use case are you trying to solve?
- **Proposal** - what do you suggest to solve the problem or improve the existing
situation?
- Any open questions to address❓
**_Frontend should now be accessible at `http://localhost:3301/application`_**
#### If you are reporting a bug, details like these are incredibly useful:
# Contribute to Query-Service
- A reproducible test case or series of steps.
- The version of our code being used.
- Any modifications you've made relevant to the bug🐞.
- Anything unusual about your environment or deployment.
Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)
Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone 🙌.
### To run ClickHouse setup (recommended for local development)
**[`^top^`](#)**
<hr>
- git clone https://github.com/SigNoz/signoz.git
- run `sudo make dev-setup` to configure local setup to run query-service
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L45`
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L28`
- add below configuration to clickhouse section at `docker/clickhouse-setup/docker-compose.yaml`
```
expose:
- 9000
## 1.2 For Pull Request(s)
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
Before sending us a pull request, please ensure that,
- Fork the SigNoz repo on GitHub, clone it on your machine.
- Create a branch with your changes.
- You are working against the latest source on the `develop` branch.
- Modify the source; please focus only on the specific change you are contributing.
- Ensure local tests pass.
- Commit to your fork using clear commit messages.
- Send us a pull request, answering any default questions in the pull request interface.
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
- Once the change has been approved and merged, we will inform you in a comment.
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
**Note:** Unless your change is small, **please** consider submitting different Pull Rrequest(s):
* 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc...
* This PR is usually trivial to review, so the size limit does not apply to
it.
* 2⃣ Second PR should include the concrete implementation of the component. If the
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
multiple PRs.
* If there are multiple sub-component then ideally each one should be implemented as
a **separate** pull request.
* Last PR should include changes to **any user-facing documentation.** And should include
end-to-end tests if applicable. The component must be enabled
only after sufficient testing, and there is enough confidence in the
stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack).
### Pointers:
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
<hr>
### Conventions to follow when submitting Commits and Pull Request(s).
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
**[`^top^`](#)**
<hr>
# 2. How to Contribute 🙋🏻‍♂️
#### There are primarily 2 areas in which you can contribute to SigNoz
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
**[`^top^`](#)**
<hr>
# 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
```
ports:
- 9001:9000
- "8080:8080"
```
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
- Next run,
```
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
FRONTEND_API_ENDPOINT=http://localhost:3301
```
If not:
```
FRONTEND_API_ENDPOINT=http://localhost:8080
```
- Next,
```
yarn install
yarn dev
```
### Important Notes:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**[`^top^`](#)**
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
- Clone the SigNoz repository and cd into signoz/frontend directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
````
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
- Next,
```
yarn install
yarn dev
```
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
**[`^top^`](#)**
<hr>
# 4. Contribute to Backend (Query-Service) 🌑
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
## 4.1 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
```
ports:
- 9001:9000
```
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
#### Run locally,
```
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
```
- Install signoz locally without the frontend and query-service
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
#### Build and Run locally
```
cd pkg/query-service
go build -o build/query-service main.go
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
```
**_Query Service should now be available at `http://localhost:8080`_**
#### Docker Images
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
```
docker pull signoz/query-service
```
---
Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
```
docker pull signoz/query-service:latest
```
```
docker pull signoz/query-service:develop
```
### Important Note:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
Click the button below. A workspace with all required environments will be created.
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz)
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz`
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
# Contribute to SigNoz Helm Chart
**[`^top^`](#)**
<hr>
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
# 5. Contribute to SigNoz Helm Chart 📊
### To run helm chart for local development
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
## 5.1 To run helm chart for local development
- Clone the SigNoz repository and cd into charts directory,
```
git clone https://github.com/SigNoz/charts.git && cd charts
```
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
- next run,
```
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
```
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**To load data with HotROD sample app:**
**5.1.1 To install the HotROD sample app:**
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
**To stop the load generation:**
**5.1.2 To load data with the HotROD sample app:**
```sh
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
**5.1.3 To stop the load generation:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```
**[`^top^`](#)**
---
## General Instructions
## Other Ways to Contribute
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
There are many other ways to get involved with the community and to participate in this project:
- If you find any bugs, please create an issue
- If you find anything missing in documentation, you can create an issue with label **documentation**
- If you want to build any new feature, please create an issue with label `enhancement`
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
- Use the product, submitting GitHub issues when a problem is found.
- Help code review pull requests and participate in issue threads.
- Submit a new feature request as an issue.
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
- Tell others about the project on Twitter, your blog, etc.
### Conventions to follow when submitting commits, PRs
1. We try to follow https://www.conventionalcommits.org/en/v1.0.0/
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
More specifically the commits and PRs should have type specifiers prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
2. Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows
3. Feel free to ping us on `#contributing` or `#contributing-frontend` on our slack community if you need any help on this :)
Thank You!

View File

@@ -1,6 +1,10 @@
MIT License
Copyright (c) 2020-present SigNoz Inc.
Copyright (c) 2021 SigNoz
Portions of this software are licensed as follows:
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -7,29 +7,36 @@ BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
# Internal variables or constants.
FRONTEND_DIRECTORY ?= frontend
FLATTENER_DIRECTORY ?= pkg/processors/flattener
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
LOCAL_GOOS ?= $(shell go env GOOS)
LOCAL_GOARCH ?= $(shell go env GOARCH)
REPONAME ?= signoz
DOCKER_TAG ?= latest
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
DEV_BUILD ?= ""
# Build-time Go variables
PACKAGE?=go.signoz.io/query-service
buildVersion=${PACKAGE}/version.buildVersion
buildHash=${PACKAGE}/version.buildHash
buildTime=${PACKAGE}/version.buildTime
gitBranch=${PACKAGE}/version.gitBranch
PACKAGE?=go.signoz.io/signoz
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service build-push-flattener
all: build-push-frontend build-push-query-service
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
@@ -38,7 +45,8 @@ build-frontend-amd64:
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" .
docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend:
@@ -46,7 +54,8 @@ build-push-frontend:
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
@@ -55,34 +64,42 @@ build-query-service-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS)
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of flattener
.PHONY: build-flattener-amd64 build-push-flattener
# Step to build docker image of flattener in amd64 (used in build pipeline)
build-flattener-amd64:
# Step to build EE docker image of query service in amd64 (used in build pipeline)
build-ee-query-service-amd64:
@echo "------------------"
@echo "--> Building flattener docker image for amd64"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
@if [ $(DEV_BUILD) != "" ]; then \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
else \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
fi
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
build-push-flattener:
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service:
@echo "------------------"
@echo "--> Building and pushing flattener docker image"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
mkdir -p /var/lib/signoz
@@ -92,8 +109,26 @@ dev-setup:
@echo "--> Local Setup completed"
@echo "------------------"
run-x86:
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
run-local:
@LOCAL_GOOS=$(LOCAL_GOOS) LOCAL_GOARCH=$(LOCAL_GOARCH) docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
up --build -d
run-arm:
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d
down-local:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v
run-x86:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
down-x86:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"

View File

@@ -5,7 +5,6 @@
</p>
<p align="center">
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
@@ -15,10 +14,10 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
<a href="https://twitter.com/SigNozHQ"><b>Twitter</b></a>
</h3>
##

View File

@@ -5,8 +5,7 @@
</p>
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
@@ -15,9 +14,9 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Documentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@@ -26,16 +25,25 @@
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
👉 Visualise Metrics, Traces and Logs in a single pane of glass
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
👉 Run aggregates on trace data to get business relevant metrics
👉 Filter and query logs, build dashboards and alerts based on attributes in logs
![screenzy-1670570187181](https://user-images.githubusercontent.com/504541/206646629-829fdafe-70e2-4503-a9c4-1301b7918586.png)
<br />
![screenzy-1670570193901](https://user-images.githubusercontent.com/504541/206646676-a676fdeb-331c-4847-aea9-d1cabf7c47e1.png)
<br />
![screenzy-1670570199026](https://user-images.githubusercontent.com/504541/206646754-28c5534f-0377-428c-9c6e-5c7c0d9dd22d.png)
<br />
![screenzy-1670569888865](https://user-images.githubusercontent.com/504541/206645819-1e865a56-71b4-4fde-80cc-fbdb137a4da5.png)
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br /><br />
@@ -51,12 +59,12 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋
## Features:
- Unified UI for metrics, traces and logs. No need to switch from Prometheus to Jaeger to debug issues, or use a logs tool like Elastic separate from your metrics and traces stack.
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
- Slowest endpoints in your application
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
- Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
<br /><br />
@@ -78,6 +86,12 @@ We support [OpenTelemetry](https://opentelemetry.io) as the library which you ca
- Python
- NodeJS
- Go
- PHP
- .NET
- Ruby
- Elixir
- Rust
You can find the complete list of languages here - https://opentelemetry.io/docs/
@@ -86,8 +100,7 @@ You can find the complete list of languages here - https://opentelemetry.io/docs
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Getting Started
### Deploy using Docker
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
@@ -100,7 +113,6 @@ The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshoo
### Deploy in Kubernetes using Helm
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
<br /><br />
@@ -110,7 +122,7 @@ Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_cha
### SigNoz vs Prometheus
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
@@ -118,24 +130,55 @@ Our goal is to provide an integrated UI between metrics & traces - similar to wh
### SigNoz vs Jaeger
Jaeger only does distributed tracing. SigNoz does both metrics and traces, and we also have log management in our roadmap.
Jaeger only does distributed tracing. SigNoz supports metrics, traces and logs - all the 3 pillars of observability.
Moreover, SigNoz has few more advanced features wrt Jaeger:
- Jaegar UI doesnt show any metrics on traces or on filtered traces
- Jaeger cant get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
<p>&nbsp </p>
### SigNoz vs Elastic
- SigNoz Logs management are based on ClickHouse, a columnar OLAP datastore which makes aggregate log analytics queries much more efficient
- 50% lower resource requirement compared to Elastic during ingestion
<p>&nbsp </p>
### SigNoz vs Loki
- SigNoz supports aggregations on high-cardinality data over a huge volume while loki doesnt.
- SigNoz supports indexes over high cardinality data and has no limitations on the number of indexes, while Loki reaches max streams with a few indexes added to it.
- Searching over a huge volume of data is difficult and slow in Loki compared to SigNoz
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Contributing
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
@@ -154,11 +197,8 @@ Join the [slack community](https://signoz.io/slack) to know more about distribut
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
As always, thanks to our amazing contributors!
As always, thanks to our amazing contributors!
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>

View File

@@ -5,7 +5,6 @@
</p>
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">

View File

@@ -5,7 +5,6 @@
</p>
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
@@ -14,14 +13,19 @@
##
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式踪来增加软件技术栈的可见性。
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式踪来增加软件技术栈的可见性。
👉 你能看到一些性能矩阵服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
👉 你能看到一些性能指标服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
👉 通过准确的踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
👉 通过准确的踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
👉 聚合trace数据来获得业务相关指标。
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
<br />
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br />
![screenzy-1647005040573](https://user-images.githubusercontent.com/504541/157875938-a3d57904-ea6d-4278-b929-bd1408d7f94c.png)
<br /><br />
@@ -37,12 +41,12 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
## 功能:
- 应用总览矩阵(matrix)如RPS, 50/90/99百分比延迟率,错误率
- 应用概览指标(metrics)如RPS, p50/p90/p99延迟率分位值,错误率等。
- 应用中最慢的终端(endpoint)
- 查看准确的网络请求跟踪来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤跟踪
- 对过滤后的跟踪数据做矩阵聚合。比如,获得过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal`的错误率和p99延迟
- 整合的矩阵和跟踪用户界面。不需要像从Prometheus切换到Jaeger才能调试问题
- 查看特定请求的trace数据来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤traces。
- 聚合trace数据(events/spans)来得到业务相关指标。比如,你可以通过过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal` 来获取指定业务的错误率和p99延迟
- 为metrics和trace提供统一的UI。排查问题不需要PrometheusJaeger之间切换。
<br /><br />
@@ -54,7 +58,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
我们想做一个自服务的开源版本的工具类似于DataDog和NewRelic用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
开源也让你对配置、采样和上线率有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
开源也让你对配置、采样和正常运行时间有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
### 语言支持
@@ -72,8 +76,8 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## 入门
### 使用Docker部署
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
@@ -81,35 +85,34 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
<p>&nbsp </p>
### 使用Helm在Kubernetes上部署
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparisons to Familiar Tools
## 与其他方案的比较
### SigNoz vs Prometheus
如果你只是需要矩阵那Prometheus是不错的但如果你要无缝的在矩阵和跟踪之间切换那目前把Prometheus & Jaeger串起来的体验并不好。
如果你只是需要监控指标(metrics)那Prometheus是不错的但如果你要无缝的在metrics和traces之间切换那目前把Prometheus & Jaeger串起来的体验并不好。
我们的目标是在矩阵和跟踪之间提供整合的UI - 类似于Datadog这样的Saas厂提供的方案,能够对跟踪进行过滤和聚合这是目前Jaeger缺失的功能。
我们的目标是为metrics和traces提供统一的UI - 类似于Datadog这样的Saas厂提供的方案。并且能够对trace进行过滤和聚合这是目前Jaeger缺失的功能。
<p>&nbsp </p>
### SigNoz vs Jaeger
Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们在计划中也有日志管理功能
Jaeger只做分布式追踪(distributed tracing)SigNoz则支持metrics,traces,logs ,即可视化的三大支柱
并且SigNoz有一些Jaeger没有的高级功能
- Jaegar UI无法在跟踪或过滤的跟踪基础上展示矩阵
- Jaeger不能过滤的跟踪上进行聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟在SigNoz这很容易实现。
- Jaegar UI无法在traces或过滤的traces上展示metrics
- Jaeger不能过滤的traces做聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟。而这个功能在SigNoz这儿是很容易实现。
<br /><br />
@@ -122,6 +125,23 @@ Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />

18
SECURITY.md Normal file
View File

@@ -0,0 +1,18 @@
# Security Policy
SigNoz is looking forward to working with security researchers across the world to keep SigNoz and our users safe. If you have found an issue in our systems/applications, please reach out to us.
## Supported Versions
We always recommend using the latest version of SigNoz to ensure you get all security updates
## Reporting a Vulnerability
If you believe you have found a security vulnerability within SigNoz, please let us know right away. We'll try and fix the problem as soon as possible.
**Do not report vulnerabilities using public GitHub issues**. Instead, email <security@signoz.io> with a detailed account of the issue. Please submit one issue per email, this helps us triage vulnerabilities.
Once we've received your email we'll keep you updated as we fix the vulnerability.
## Thanks
Thank you for keeping SigNoz and our users safe. 🙇

View File

@@ -0,0 +1,35 @@
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
route:
receiver: 'slack-notifications'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
text: >-
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@@ -0,0 +1,11 @@
groups:
- name: ExampleCPULoadGroup
rules:
- alert: HighCpuLoad
expr: system_cpu_load_average_1m > 0.1
for: 0m
labels:
severity: warning
annotations:
summary: High CPU load
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration>
</clickhouse>

View File

@@ -0,0 +1,123 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- See also the files in users.d directory where the password can be overridden.
Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
place 'kerberos' element instead of 'password' (and similar) elements.
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
whose initiator's realm matches it.
Example: <kerberos />
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password></password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -1,106 +1,239 @@
version: "3"
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
deploy:
restart_policy:
condition: on-failure
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-clickhouse-depend: &clickhouse-depend
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
services:
clickhouse:
image: yandex/clickhouse-server
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
- ./data/clickhouse/:/var/lib/clickhouse/
zookeeper-1:
image: bitnami/zookeeper:3.7.0
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
# clickhouse-2:
# <<: *clickhouse-defaults
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.0-0.2
volumes:
- ./data/alertmanager:/data
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
depends_on:
- query-service
deploy:
restart_policy:
condition: on-failure
query-service:
image: signoz/query-service:0.4.1
container_name: query-service
restart: always
image: signoz/query-service:0.13.0
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
- clickhouse
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
image: signoz/frontend:0.13.0
deploy:
restart_policy:
condition: on-failure
depends_on:
- alertmanager
- query-service
links:
- "query-service"
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.4.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=2000"]
image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- DOCKER_MULTI_NODE_CLUSTER=false
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # Health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
deploy:
mode: replicated
replicas: 3
depends_on:
- clickhouse
mode: global
restart_policy:
condition: on-failure
<<: *clickhouse-depend
otel-collector-hostmetrics:
image: signoz/otelcontribcol:0.4.0
command: ["--config=/etc/otel-collector-config-hostmetrics.yaml", "--mem-ballast-size-mib=683"]
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-config-hostmetrics.yaml:/etc/otel-collector-config-hostmetrics.yaml
depends_on:
- clickhouse
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
# ports:
# - "1777:1777" # pprof extension
# - "8888:8888" # OtelCollector internal metrics
# - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
deploy:
restart_policy:
condition: on-failure
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
image: jaegertracing/example-hotrod:1.30
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
logging:
options:
max-size: 50m
max-file: "3"
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
@@ -110,4 +243,4 @@ services:
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust
- ../common/locust-scripts:/locust

View File

@@ -1,72 +0,0 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
hostmetrics:
collection_interval: 60s
scrapers:
cpu:
load:
memory:
disk:
filesystem:
network:
# Data sources: metrics
prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
dns_sd_configs:
- names:
- 'tasks.signoz_otel-collector'
type: 'A'
port: 8888
- job_name: "otel-collector-hostmetrics"
scrape_interval: 10s
static_configs:
- targets: ["otel-collector-hostmetrics:8888"]
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]
metrics:
receivers: [otlp, prometheus, hostmetrics]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -1,47 +1,159 @@
receivers:
filelog/dockercontainers:
include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators:
- type: json_parser
id: parser-docker
output: extract_metadata_from_filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: extract_metadata_from_filepath
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
parse_from: attributes["log.file.path"]
output: parse_body
- type: move
id: parse_body
from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus:
endpoint: 0.0.0.0:55678
otlp/spanmetrics:
protocols:
grpc:
endpoint: localhost:12345
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
processors:
batch:
send_batch_size: 1000
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
retry_on_failure:
enabled: true
initial_interval: 5s
max_interval: 30s
max_elapsed_time: 300s
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
service:
extensions: [health_check, zpages]
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
logs:
receivers: [otlp, filelog/dockercontainers]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@@ -0,0 +1,62 @@
receivers:
prometheus:
config:
scrape_configs:
# otel-collector-metrics internal metrics
- job_name: otel-collector-metrics
scrape_interval: 60s
static_configs:
- targets:
- localhost:8888
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s
dns_sd_configs:
- names:
- tasks.otel-collector
type: A
port: 8889
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
exporters:
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof]
pipelines:
metrics:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -9,17 +9,17 @@ alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
scrape_configs: []
remote_read:
- url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@@ -1,30 +1,43 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api/alertmanager {
proxy_pass http://alertmanager:9093/api/v2;
}
location /api {
proxy_pass http://query-service:8080/api;
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
root /usr/share/nginx/html;
}
}

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<endpoint>https://BUCKET-NAME.s3.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration>
</clickhouse>

View File

@@ -0,0 +1,123 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- See also the files in users.d directory where the password can be overridden.
Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
place 'kerberos' element instead of 'password' (and similar) elements.
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
whose initiator's realm matches it.
Example: <kerberos />
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password></password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -0,0 +1,108 @@
version: "2.4"
services:
clickhouse:
image: clickhouse/clickhouse-server:22.8.8-alpine
container_name: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
container_name: alertmanager
image: signoz/alertmanager:0.23.0-0.2
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: otel-collector
image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-config.yaml"]
# user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-metrics:
container_name: otel-collector-metrics
image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
# ports:
# - "1777:1777" # pprof extension
# - "8888:8888" # OtelCollector internal metrics
# - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,56 @@
version: "2.4"
services:
query-service:
hostname: query-service
build:
context: "../../../pkg/query-service"
dockerfile: "./Dockerfile"
args:
LDFLAGS: ""
TARGETPLATFORM: "${LOCAL_GOOS}/${LOCAL_GOARCH}"
container_name: query-service
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
command: ["-config=/root/config/prometheus.yml"]
ports:
- "6060:6060"
- "8080:8080"
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
frontend:
build:
context: "../../../frontend"
dockerfile: "./Dockerfile"
args:
TARGETOS: "${LOCAL_GOOS}"
TARGETPLATFORM: "${LOCAL_GOARCH}"
container_name: frontend
environment:
- FRONTEND_API_ENDPOINT=http://query-service:8080
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -1,98 +0,0 @@
version: "2.4"
services:
clickhouse:
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
image: signoz/alertmanager:0.5.0
volumes:
- ./alertmanager.yml:/prometheus/alertmanager.yml
- ./data/alertmanager:/data
command:
- '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data'
query-service:
image: signoz/query-service:0.7.0
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.7.0
container_name: frontend
depends_on:
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.43.0
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "4317:4317" # OTLP GRPC receiver
mem_limit: 2000m
restart: always
depends_on:
clickhouse:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.43.0
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,51 +1,184 @@
version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
image: clickhouse/clickhouse-server:22.8.8-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-clickhouse-depend: &clickhouse-depend
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.0
container_name: zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: yandex/clickhouse-server:21.12.3.32
<<: *clickhouse-defaults
container_name: clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.5.0
image: signoz/alertmanager:0.23.0-0.2
volumes:
- ./alertmanager.yml:/prometheus/alertmanager.yml
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data'
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.7.0
image: signoz/query-service:0.13.0
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
depends_on:
clickhouse:
condition: service_healthy
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
timeout: 5s
retries: 3
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.7.0
image: signoz/frontend:0.13.0
container_name: frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
@@ -53,37 +186,53 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.43.0
image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
ports:
- "4317:4317" # OTLP GRPC receiver
mem_limit: 2000m
restart: always
depends_on:
clickhouse:
condition: service_healthy
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/otelcontribcol:0.43.0
image: signoz/signoz-otel-collector:0.66.1
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
# ports:
# - "1777:1777" # pprof extension
# - "8888:8888" # OtelCollector internal metrics
# - "13133:13133" # Health check extension
# - "55679:55679" # zPages extension
restart: on-failure
<<: *clickhouse-depend
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"

View File

@@ -1,67 +1,163 @@
receivers:
filelog/dockercontainers:
include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators:
- type: json_parser
id: parser-docker
output: extract_metadata_from_filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: extract_metadata_from_filepath
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
parse_from: attributes["log.file.path"]
output: parse_body
- type: move
id: parse_body
from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus:
endpoint: 0.0.0.0:55678
otlp/spanmetrics:
protocols:
grpc:
endpoint: "localhost:12345"
endpoint: localhost:12345
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
scrapers:
cpu:
load:
memory:
disk:
filesystem:
network:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
processors:
batch:
send_batch_size: 1000
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# memory_limiter:
# # Same as --mem-ballast-size-mib CLI argument
# ballast_size_mib: 683
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
extensions:
health_check: {}
zpages: {}
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
prometheus:
endpoint: "0.0.0.0:8889"
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
retry_on_failure:
enabled: true
initial_interval: 5s
max_interval: 30s
max_elapsed_time: 300s
service:
extensions: [health_check, zpages]
telemetry:
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- zpages
- pprof
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhouse]
exporters: [clickhousetraces]
metrics:
receivers: [otlp, hostmetrics]
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
exporters: [prometheus]
logs:
receivers: [otlp, filelog/dockercontainers]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@@ -3,42 +3,65 @@ receivers:
protocols:
grpc:
http:
# Data sources: metrics
prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
scrape_interval: 30s
# otel-collector-metrics internal metrics
- job_name: otel-collector-metrics
scrape_interval: 60s
static_configs:
- targets: ["otel-collector:8889"]
- targets:
- localhost:8888
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s
static_configs:
- targets:
- otel-collector:8889
processors:
batch:
send_batch_size: 1000
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
# memory_limiter:
# # Same as --mem-ballast-size-mib CLI argument
# ballast_size_mib: 683
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
service:
extensions: [health_check, zpages]
telemetry:
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- zpages
- pprof
pipelines:
metrics:
receivers: [otlp, prometheus]
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite]

View File

@@ -19,8 +19,7 @@ rule_files:
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
scrape_configs: []
remote_read:
- url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@@ -1,33 +1,43 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api/alertmanager{
proxy_pass http://alertmanager:9093/api/v2;
location /api/alertmanager {
proxy_pass http://alertmanager:9093/api/v2;
}
location /api {
proxy_pass http://query-service:8080/api;
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
root /usr/share/nginx/html;
}
}

View File

@@ -1,273 +0,0 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/data
- coordinator_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_tiny/coordinator
- environment_tiny/common
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_tiny/broker
- environment_tiny/common
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/data
- historical_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_tiny/historical
- environment_tiny/common
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/data
- middle_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_tiny/middlemanager
- environment_tiny/common
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_tiny/router
- environment_tiny/common
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.4.0
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
container_name: query-service
depends_on:
router:
condition: service_healthy
ports:
- "8080:8080"
volumes:
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,269 +0,0 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/druid/deepStorage
- coordinator_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_small/coordinator
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_small/broker
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/druid/deepStorage
- historical_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_small/historical
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/druid/deepStorage
- middle_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_small/middlemanager
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_small/router
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.4.0
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
container_name: query-service
depends_on:
router:
condition: service_healthy
ports:
- "8080:8080"
volumes:
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3301:3301"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ./locust-scripts:/locust

View File

@@ -1 +0,0 @@
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]

View File

@@ -1,69 +0,0 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafka:9092"
}
}
}

View File

@@ -1,53 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=768m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=100MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,52 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,53 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=1280m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=200MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,53 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=1g
DRUID_XMS=1g
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=2g
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=200MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,52 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=128m
DRUID_XMS=128m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,52 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,26 +0,0 @@
# For S3 storage
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type=s3
# druid_storage_bucket=<s3-bucket-name>
# druid_storage_baseKey=druid/segments
# AWS_ACCESS_KEY_ID=<s3-access-id>
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
# AWS_REGION=<s3-aws-region>
# druid_indexer_logs_type=s3
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
# druid_indexer_logs_s3Prefix=druid/indexing-logs
# -----------------------------------------------------------
# For local storage
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_storage_type=local
druid_storage_storageDirectory=/opt/data/segments
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/data/indexing-logs

View File

@@ -1,49 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,49 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,50 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,49 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -1,51 +0,0 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
queued_retry:
num_workers: 4
queue_size: 100
retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
kafka/traces:
brokers:
- kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka/metrics]

View File

@@ -36,9 +36,9 @@ is_mac() {
[[ $OSTYPE == darwin* ]]
}
is_arm64(){
[[ `uname -m` == 'arm64' ]]
}
# is_arm64(){
# [[ `uname -m` == 'arm64' ]]
# }
check_os() {
if is_mac; then
@@ -102,7 +102,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|3301|8080"
local ports_pattern="3301|4317"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -119,7 +119,7 @@ check_ports_occupied() {
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
@@ -133,58 +133,44 @@ install_docker() {
if [[ $package_manager == apt-get ]]; then
apt_cmd="sudo apt-get --yes --quiet"
apt_cmd="$sudo_cmd apt-get --yes --quiet"
$apt_cmd update
$apt_cmd install software-properties-common gnupg-agent
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
sudo add-apt-repository \
curl -fsSL "https://download.docker.com/linux/$os/gpg" | $sudo_cmd apt-key add -
$sudo_cmd add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
$apt_cmd update
echo "Installing docker"
$apt_cmd install docker-ce docker-ce-cli containerd.io
elif [[ $package_manager == zypper ]]; then
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
zypper_cmd="$sudo_cmd zypper --quiet --no-gpg-checks --non-interactive"
echo "Installing docker"
if [[ $os == sles ]]; then
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
os_arch="$(uname -i)"
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
fi
$zypper_cmd install docker docker-runc containerd
sudo systemctl enable docker.service
$sudo_cmd systemctl enable docker.service
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
echo
echo "Amazon Linux detected ... "
echo
# sudo yum install docker
# sudo service docker start
sudo amazon-linux-extras install docker
# yum install docker
# service docker start
$sudo_cmd yum install -y amazon-linux-extras
$sudo_cmd amazon-linux-extras enable docker
$sudo_cmd yum install -y docker
else
yum_cmd="sudo yum --assumeyes --quiet"
yum_cmd="$sudo_cmd yum --assumeyes --quiet"
$yum_cmd install yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
echo "Installing docker"
$yum_cmd install docker-ce docker-ce-cli containerd.io
fi
}
install_docker_machine() {
echo "\nInstalling docker machine ..."
if [[ $os == "Mac" ]];then
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
chmod +x /usr/local/bin/docker-machine
else
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
chmod +x /tmp/docker-machine
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
fi
}
install_docker_compose() {
@@ -192,9 +178,9 @@ install_docker_compose() {
if [[ ! -f /usr/bin/docker-compose ]];then
echo "++++++++++++++++++++++++"
echo "Installing docker-compose"
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
$sudo_cmd curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
$sudo_cmd chmod +x /usr/local/bin/docker-compose
$sudo_cmd ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
echo "docker-compose installed!"
echo ""
fi
@@ -210,16 +196,28 @@ install_docker_compose() {
}
start_docker() {
echo "Starting Docker ..."
if [ $os = "Mac" ]; then
echo -e "🐳 Starting Docker ...\n"
if [[ $os == "Mac" ]]; then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
else
if ! sudo systemctl is-active docker.service > /dev/null; then
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
echo "Starting docker service"
sudo systemctl start docker.service
$sudo_cmd systemctl start docker.service
fi
# if [[ -z $sudo_cmd ]]; then
# docker ps > /dev/null && true
# if [[ $? -ne 0 ]]; then
# request_sudo
# fi
# fi
if [[ -z $sudo_cmd ]]; then
if ! docker ps > /dev/null && true; then
request_sudo
fi
fi
fi
}
wait_for_containers_start() {
local timeout=$1
@@ -229,16 +227,6 @@ wait_for_containers_start() {
if [[ status_code -eq 200 ]]; then
break
else
if [ $setup_type == 'druid' ]; then
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
fi
fi
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
fi
((timeout--))
@@ -249,31 +237,26 @@ wait_for_containers_start() {
}
bye() { # Prints a friendly good bye message and exits the script.
if [ "$?" -ne 0 ]; then
if [[ "$?" -ne 0 ]]; then
set +o errexit
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
else
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
fi
else
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
echo -e "\n📨 Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
if [[ $email == "" ]]; then
echo -e "\n📨 Please share your email to receive support with the installation"
read -rp 'Email: ' email
done
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
fi
send_event "installation_support"
@@ -284,33 +267,82 @@ bye() { # Prints a friendly good bye message and exits the script.
fi
}
request_sudo() {
if hash sudo 2>/dev/null; then
echo -e "\n\n🙇 We will need sudo access to complete the installation."
if (( $EUID != 0 )); then
sudo_cmd="sudo"
echo -e "Please enter your sudo password, if prompt."
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
# echo "Need sudo privileges to proceed with the installation."
# exit 1;
# fi
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
echo "Need sudo privileges to proceed with the installation."
exit 1;
fi
echo -e "Got it! Thanks!! 🙏\n"
echo -e "Okay! We will bring up the SigNoz cluster from here 🚀\n"
fi
fi
}
echo ""
echo -e "👋 Thank you for trying out SigNoz! "
echo ""
sudo_cmd=""
# Check sudo permissions
if (( $EUID != 0 )); then
echo "🟡 Running installer with non-sudo permissions."
echo " In case of any failure or prompt, please consider running the script with sudo privileges."
echo ""
else
sudo_cmd="sudo"
fi
# Checking OS and assigning package manager
desired_os=0
os=""
email=""
echo -e "Detecting your OS ..."
echo -e "🌏 Detecting your OS ...\n"
check_os
# Obtain unique installation id
sysinfo="$(uname -a)"
if [ $? -ne 0 ]; then
# sysinfo="$(uname -a)"
# if [[ $? -ne 0 ]]; then
# uuid="$(uuidgen)"
# uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
# sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
# fi
if ! sysinfo="$(uname -a)"; then
uuid="$(uuidgen)"
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
sysinfo="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
fi
digest_cmd=""
if hash shasum 2>/dev/null; then
digest_cmd="shasum -a 256"
elif hash sha256sum 2>/dev/null; then
digest_cmd="sha256sum"
elif hash openssl 2>/dev/null; then
digest_cmd="openssl dgst -sha256"
fi
if [[ -z $digest_cmd ]]; then
SIGNOZ_INSTALLATION_ID="$sysinfo"
else
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1)
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
fi
# echo ""
# echo -e "👉 ${RED}Two ways to go forward\n"
# echo -e "${RED}1) ClickHouse as database (default)\n"
# echo -e "${RED}2) Kafka + Druid as datastore \n"
# read -p "⚙️ Enter your preference (1/2):" choice_setup
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
@@ -323,8 +355,6 @@ fi
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
# setup_type='clickhouse'
# else
# setup_type='druid'
# fi
setup_type='clickhouse'
@@ -364,13 +394,7 @@ send_event() {
'installation_error_checks')
event="Installation Error - Checks"
error="Containers not started"
if [ $setup_type == 'clickhouse' ]; then
others='"data": "some_checks",'
else
supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",'
fi
others='"data": "some_checks",'
;;
'installation_support')
event="Installation Support"
@@ -389,7 +413,7 @@ send_event() {
;;
esac
if [ "$error" != "" ]; then
if [[ "$error" != "" ]]; then
error='"error": "'"$error"'", '
fi
@@ -412,15 +436,28 @@ fi
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
if ! is_command_present docker; then
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
request_sudo
install_docker
else
# enable docker without sudo from next reboot
sudo usermod -aG docker "${USER}"
elif is_mac; then
echo ""
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
send_event "docker_not_installed"
exit 1
else
echo ""
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "Docker must be installed manually on your machine to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/get-docker/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
send_event "docker_not_installed"
exit 1
fi
@@ -428,43 +465,25 @@ fi
# Install docker-compose
if ! is_command_present docker-compose; then
request_sudo
install_docker_compose
fi
start_docker
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
else
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
fi
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
fi
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
echo ""
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
else
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
fi
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
fi
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
wait_for_containers_start 60
echo ""
@@ -473,11 +492,9 @@ if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
@@ -495,15 +512,7 @@ else
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
fi
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
fi
echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"

37
ee/LICENSE Normal file
View File

@@ -0,0 +1,37 @@
The SigNoz Enterprise license (the "Enterprise License")
Copyright (c) 2020 - present SigNoz Inc.
With regard to the SigNoz Software:
This software and associated documentation files (the "Software") may only be
used in production, if you (and any entity that you represent) have agreed to,
and are in compliance with, the SigNoz Subscription Terms of Service, available
via email (hello@signoz.io) (the "Enterprise Terms"), or other
agreement governing the use of the Software, as agreed by you and SigNoz,
and otherwise have a valid SigNoz Enterprise license for the
correct number of user seats. Subject to the foregoing sentence, you are free to
modify this Software and publish patches to the Software. You agree that SigNoz
and/or its licensors (as applicable) retain all right, title and interest in and
to all such modifications and/or patches, and all such modifications and/or
patches may only be used, copied, modified, displayed, distributed, or otherwise
exploited with a valid SigNoz Enterprise license for the correct
number of user seats. Notwithstanding the foregoing, you may copy and modify
the Software for development and testing purposes, without requiring a
subscription. You agree that SigNoz and/or its licensors (as applicable) retain
all right, title and interest in and to all such modifications. You are not
granted any other rights beyond what is expressly stated herein. Subject to the
foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
and/or sell the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
For all third party components incorporated into the SigNoz Software, those
components are licensed under the original license provided by the owner of the
applicable component.

View File

@@ -0,0 +1,4 @@
.vscode
README.md
signoz.db
bin

View File

@@ -0,0 +1,48 @@
FROM golang:1.17-buster AS builder
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
ARG LD_FLAGS
ARG TARGETPLATFORM
ENV CGO_ENABLED=1
ENV GOPATH=/go
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
# Prepare and enter src directory
WORKDIR /go/src/github.com/signoz/signoz
# Add the sources and proceed with build
ADD . .
RUN cd ee/query-service \
&& go build -tags timetzdata -a -o ./bin/query-service \
-ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
&& chmod +x ./bin/query-service
# use a minimal alpine image
FROM alpine:3.7
# Add Maintainer Info
LABEL maintainer="signoz"
# add ca-certificates in case you need them
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
# copy the binary from builder
COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
# run the binary
ENTRYPOINT ["./query-service"]
CMD ["-config", "../config/prometheus.yml"]
# CMD ["./query-service -config /root/config/prometheus.yml"]
EXPOSE 8080

View File

@@ -0,0 +1,131 @@
package api
import (
"net/http"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/license"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version"
)
type APIHandlerOptions struct {
DataConnector interfaces.DataConnector
AppDao dao.ModelDao
RulesManager *rules.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
}
type APIHandler struct {
opts APIHandlerOptions
baseapp.APIHandler
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags})
if err != nil {
return nil, err
}
ah := &APIHandler{
opts: opts,
APIHandler: *baseHandler,
}
return ah, nil
}
func (ah *APIHandler) FF() baseint.FeatureLookup {
return ah.opts.FeatureFlags
}
func (ah *APIHandler) RM() *rules.Manager {
return ah.opts.RulesManager
}
func (ah *APIHandler) LM() *license.Manager {
return ah.opts.LicenseManager
}
func (ah *APIHandler) AppDao() dao.ModelDao {
return ah.opts.AppDao
}
func (ah *APIHandler) CheckFeature(f string) bool {
err := ah.FF().CheckFeature(f)
return err == nil
}
// RegisterRoutes registers routes for this handler on the given router
func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
// note: add ee override methods first
// routes available only in ee version
router.HandleFunc("/api/v1/licenses",
baseapp.AdminAccess(ah.listLicenses)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/licenses",
baseapp.AdminAccess(ah.applyLicense)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/featureFlags",
baseapp.OpenAccess(ah.getFeatureFlags)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/loginPrecheck",
baseapp.OpenAccess(ah.precheckLogin)).
Methods(http.MethodGet)
// paid plans specific routes
router.HandleFunc("/api/v1/complete/saml",
baseapp.OpenAccess(ah.receiveSAML)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/complete/google",
baseapp.OpenAccess(ah.receiveGoogleAuth)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/orgs/{orgId}/domains",
baseapp.AdminAccess(ah.listDomainsByOrg)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/domains",
baseapp.AdminAccess(ah.postDomain)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/domains/{id}",
baseapp.AdminAccess(ah.putDomain)).
Methods(http.MethodPut)
router.HandleFunc("/api/v1/domains/{id}",
baseapp.AdminAccess(ah.deleteDomain)).
Methods(http.MethodDelete)
// base overrides
router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/traces/{traceId}", baseapp.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
router.HandleFunc("/api/v2/metrics/query_range", baseapp.ViewAccess(ah.queryRangeMetricsV2)).Methods(http.MethodPost)
ah.APIHandler.RegisterRoutes(router)
}
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
version := version.GetVersion()
ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
}

View File

@@ -0,0 +1,332 @@
package api
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func parseRequest(r *http.Request, req interface{}) error {
defer r.Body.Close()
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
err = json.Unmarshal(requestBody, &req)
return err
}
// loginUser overrides base handler and considers SSO case.
func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
req := basemodel.LoginRequest{}
err := parseRequest(r, &req)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
ctx := context.Background()
if req.Email != "" && ah.CheckFeature(model.SSO) {
var apierr basemodel.BaseApiError
_, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
if apierr != nil && !apierr.IsNil() {
RespondError(w, apierr, nil)
}
}
// if all looks good, call auth
resp, err := auth.Login(ctx, &req)
if ah.HandleError(w, err, http.StatusUnauthorized) {
return
}
ah.WriteJSON(w, r, resp)
}
// registerUser registers a user and responds with a precheck
// so the front-end can decide the login method
func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(model.SSO) {
ah.APIHandler.Register(w, r)
return
}
ctx := context.Background()
var req *baseauth.RegisterRequest
defer r.Body.Close()
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("received no input in api\n", err)
RespondError(w, model.BadRequest(err), nil)
return
}
err = json.Unmarshal(requestBody, &req)
if err != nil {
zap.S().Errorf("received invalid user registration request", zap.Error(err))
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
return
}
// get invite object
invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil || invite == nil {
zap.S().Errorf("failed to validate invite token", err)
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
}
// get auth domain from email domain
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
if apierr != nil {
zap.S().Errorf("failed to get domain from email", apierr)
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
}
precheckResp := &model.PrecheckResponse{
SSO: false,
IsUser: false,
}
if domain != nil && domain.SsoEnabled {
// so is enabled, create user and respond precheck data
user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
var precheckError basemodel.BaseApiError
precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
if precheckError != nil {
RespondError(w, precheckError, precheckResp)
}
} else {
// no-sso, validate password
if err := auth.ValidatePassword(req.Password); err != nil {
RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
return
}
_, registerError := baseauth.Register(ctx, req)
if !registerError.IsNil() {
RespondError(w, apierr, nil)
return
}
precheckResp.IsUser = true
}
ah.Respond(w, precheckResp)
}
// getInvite returns the invite object details for the given invite token. We do not need to
// protect this API because invite token itself is meant to be private.
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
token := mux.Vars(r)["token"]
sourceUrl := r.URL.Query().Get("ref")
ctx := context.Background()
inviteObject, err := baseauth.GetInvite(context.Background(), token)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
resp := model.GettableInvitation{
InvitationResponseObject: inviteObject,
}
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
resp.Precheck = precheck
if apierr != nil {
RespondError(w, apierr, resp)
}
ah.WriteJSON(w, r, resp)
}
// PrecheckLogin enables browser login page to display appropriate
// login methods
func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
email := r.URL.Query().Get("email")
sourceUrl := r.URL.Query().Get("ref")
resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
if apierr != nil {
RespondError(w, apierr, resp)
}
ah.Respond(w, resp)
}
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
ssoError := []byte("Login failed. Please contact your system administrator")
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
base64.StdEncoding.Encode(dst, ssoError)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
}
// receiveGoogleAuth completes google OAuth response and forwards a request
// to front-end to sign user in
func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) {
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
return
}
relayState := q.Get("state")
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
// now that we have domain, use domain to fetch sso settings.
// prepare google callback handler using parsedState -
// which contains redirect URL (front-end endpoint)
callbackHandler, err := domain.PrepareGoogleOAuthProvider(parsedState)
identity, err := callbackHandler.HandleCallback(r)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}
// receiveSAML completes a SAML request and gets user logged in
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// this is the source url that initiated the login request
redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// the relay state is sent when a login request is submitted to
// Idp.
relayState := r.FormValue("RelayState")
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
handleSsoError(w, r, redirectUri)
return
}
// upgrade redirect url from the relay state for better accuracy
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
// fetch domain by parsing relay state.
domain, err := ah.AppDao().GetDomainFromSsoResponse(ctx, parsedState)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil {
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
if err != nil {
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
if assertionInfo.WarningInfo.InvalidTime {
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
handleSsoError(w, r, redirectUri)
return
}
email := assertionInfo.NameID
if email == "" {
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
http.Redirect(w, r, nextPage, http.StatusSeeOther)
}

View File

@@ -0,0 +1,90 @@
package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
)
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
orgId := mux.Vars(r)["orgId"]
domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
if apierr != nil {
RespondError(w, apierr, domains)
return
}
ah.Respond(w, domains)
}
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := model.OrgDomain{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if err := req.ValidNew(); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &req)
}
func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
domainIdStr := mux.Vars(r)["id"]
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req := model.OrgDomain{Id: domainId}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req.Id = domainId
if err := req.Valid(nil); err != nil {
RespondError(w, model.BadRequest(err), nil)
}
if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, &req)
}
func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
domainIdStr := mux.Vars(r)["id"]
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
return
}
apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, nil)
}

View File

@@ -0,0 +1,10 @@
package api
import (
"net/http"
)
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
featureSet := ah.FF().GetFeatureFlags()
ah.Respond(w, featureSet)
}

View File

@@ -0,0 +1,40 @@
package api
import (
"context"
"encoding/json"
"fmt"
"go.signoz.io/signoz/ee/query-service/model"
"net/http"
)
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
RespondError(w, apiError, nil)
}
ah.Respond(w, licenses)
}
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
var l model.License
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if l.Key == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
license, apiError := ah.LM().Activate(ctx, l.Key)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, license)
}

View File

@@ -0,0 +1,236 @@
package api
import (
"bytes"
"fmt"
"net/http"
"sync"
"text/template"
"time"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
"go.signoz.io/signoz/pkg/query-service/app/parser"
"go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
querytemplate "go.signoz.io/signoz/pkg/query-service/utils/queryTemplate"
"go.uber.org/zap"
)
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
ah.APIHandler.QueryRangeMetricsV2(w, r)
return
}
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
// prometheus instant query needs same timestamp
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.PROM {
metricsQueryRangeParams.Start = metricsQueryRangeParams.End
}
// round up the end to nearest multiple
if metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER {
end := (metricsQueryRangeParams.End) / 1000
step := metricsQueryRangeParams.Step
metricsQueryRangeParams.End = (end / step * step) * 1000
}
type channelResult struct {
Series []*basemodel.Series
TableName string
Err error
Name string
Query string
}
execClickHouseQueries := func(queries map[string]string) ([]*basemodel.Series, []string, error, map[string]string) {
var seriesList []*basemodel.Series
var tableName []string
ch := make(chan channelResult, len(queries))
var wg sync.WaitGroup
for name, query := range queries {
wg.Add(1)
go func(name, query string) {
defer wg.Done()
seriesList, tableName, err := ah.opts.DataConnector.GetMetricResultEE(r.Context(), query)
for _, series := range seriesList {
series.QueryName = name
}
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
return
}
ch <- channelResult{Series: seriesList, TableName: tableName}
}(name, query)
}
wg.Wait()
close(ch)
var errs []error
errQuriesByName := make(map[string]string)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Query
continue
}
seriesList = append(seriesList, r.Series...)
tableName = append(tableName, r.TableName)
}
if len(errs) != 0 {
return nil, nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
}
return seriesList, tableName, nil, nil
}
execPromQueries := func(metricsQueryRangeParams *basemodel.QueryRangeParamsV2) ([]*basemodel.Series, error, map[string]string) {
var seriesList []*basemodel.Series
ch := make(chan channelResult, len(metricsQueryRangeParams.CompositeMetricQuery.PromQueries))
var wg sync.WaitGroup
for name, query := range metricsQueryRangeParams.CompositeMetricQuery.PromQueries {
if query.Disabled {
continue
}
wg.Add(1)
go func(name string, query *basemodel.PromQuery) {
var seriesList []*basemodel.Series
defer wg.Done()
tmpl := template.New("promql-query")
tmpl, tmplErr := tmpl.Parse(query.Query)
if tmplErr != nil {
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
return
}
var queryBuf bytes.Buffer
tmplErr = tmpl.Execute(&queryBuf, metricsQueryRangeParams.Variables)
if tmplErr != nil {
ch <- channelResult{Err: fmt.Errorf("error in parsing query-%s: %v", name, tmplErr), Name: name, Query: query.Query}
return
}
query.Query = queryBuf.String()
queryModel := basemodel.QueryRangeParams{
Start: time.UnixMilli(metricsQueryRangeParams.Start),
End: time.UnixMilli(metricsQueryRangeParams.End),
Step: time.Duration(metricsQueryRangeParams.Step * int64(time.Second)),
Query: query.Query,
}
promResult, _, err := ah.opts.DataConnector.GetQueryRangeResult(r.Context(), &queryModel)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query.Query}
return
}
matrix, _ := promResult.Matrix()
for _, v := range matrix {
var s basemodel.Series
s.QueryName = name
s.Labels = v.Metric.Copy().Map()
for _, p := range v.Points {
s.Points = append(s.Points, basemodel.MetricPoint{Timestamp: p.T, Value: p.V})
}
seriesList = append(seriesList, &s)
}
ch <- channelResult{Series: seriesList}
}(name, query)
}
wg.Wait()
close(ch)
var errs []error
errQuriesByName := make(map[string]string)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Query
continue
}
seriesList = append(seriesList, r.Series...)
}
if len(errs) != 0 {
return nil, fmt.Errorf("encountered multiple errors: %s", metrics.FormatErrs(errs, "\n")), errQuriesByName
}
return seriesList, nil, nil
}
var seriesList []*basemodel.Series
var tableName []string
var err error
var errQuriesByName map[string]string
switch metricsQueryRangeParams.CompositeMetricQuery.QueryType {
case basemodel.QUERY_BUILDER:
runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME)
if runQueries.Err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: runQueries.Err}, nil)
return
}
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(runQueries.Queries)
case basemodel.CLICKHOUSE:
queries := make(map[string]string)
for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries {
if chQuery.Disabled {
continue
}
tmpl := template.New("clickhouse-query")
tmpl, err := tmpl.Parse(chQuery.Query)
if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
return
}
var query bytes.Buffer
// replace go template variables
querytemplate.AssignReservedVars(metricsQueryRangeParams)
err = tmpl.Execute(&query, metricsQueryRangeParams.Variables)
if err != nil {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, nil)
return
}
queries[name] = query.String()
}
seriesList, tableName, err, errQuriesByName = execClickHouseQueries(queries)
case basemodel.PROM:
seriesList, err, errQuriesByName = execPromQueries(metricsQueryRangeParams)
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}, errQuriesByName)
return
}
if err != nil {
apiErrObj := &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
return
}
if metricsQueryRangeParams.CompositeMetricQuery.PanelType == basemodel.QUERY_VALUE &&
len(seriesList) > 1 &&
(metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.QUERY_BUILDER ||
metricsQueryRangeParams.CompositeMetricQuery.QueryType == basemodel.CLICKHOUSE) {
RespondError(w, &basemodel.ApiError{Typ: basemodel.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil)
return
}
type ResponseFormat struct {
ResultType string `json:"resultType"`
Result []*basemodel.Series `json:"result"`
TableName []string `json:"tableName"`
}
resp := ResponseFormat{ResultType: "matrix", Result: seriesList, TableName: tableName}
ah.Respond(w, resp)
}

View File

@@ -0,0 +1,12 @@
package api
import (
"net/http"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
baseapp.RespondError(w, apiErr, data)
}

View File

@@ -0,0 +1,39 @@
package api
import (
"net/http"
"strconv"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
ah.APIHandler.SearchTraces(w, r)
return
}
traceId, spanId, levelUpInt, levelDownInt, err := baseapp.ParseSearchTracesParams(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
return
}
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
if err != nil {
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)
if ah.HandleError(w, err, http.StatusBadRequest) {
return
}
ah.WriteJSON(w, r, result)
}

View File

@@ -0,0 +1,401 @@
package db
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"reflect"
"regexp"
"sort"
"strings"
"time"
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
// GetMetricResultEE runs the query and returns list of time series
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult")()
zap.S().Infof("Executing metric result query: %s", query)
var hash string
// If getSubTreeSpans function is used in the clickhouse query
if strings.Index(query, "getSubTreeSpans(") != -1 {
var err error
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
if err == fmt.Errorf("No spans found for the given query") {
return nil, "", nil
}
if err != nil {
return nil, "", err
}
}
rows, err := r.conn.Query(ctx, query)
zap.S().Debug(query)
if err != nil {
zap.S().Debug("Error in processing query: ", err)
return nil, "", fmt.Errorf("error in processing query")
}
var (
columnTypes = rows.ColumnTypes()
columnNames = rows.Columns()
vars = make([]interface{}, len(columnTypes))
)
for i := range columnTypes {
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make(map[string][]basemodel.MetricPoint)
// attribute key-value pairs for each group selection
attributesMap := make(map[string]map[string]string)
defer rows.Close()
for rows.Next() {
if err := rows.Scan(vars...); err != nil {
return nil, "", err
}
var groupBy []string
var metricPoint basemodel.MetricPoint
groupAttributes := make(map[string]string)
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx, v := range vars {
colName := columnNames[idx]
switch v := v.(type) {
case *string:
// special case for returning all labels
if colName == "fullLabels" {
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
return nil, "", err
}
for key, val := range metric {
groupBy = append(groupBy, val)
groupAttributes[key] = val
}
} else {
groupBy = append(groupBy, *v)
groupAttributes[colName] = *v
}
case *time.Time:
metricPoint.Timestamp = v.UnixMilli()
case *float64:
metricPoint.Value = *v
case **float64:
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := *v
if floatVal != nil {
metricPoint.Value = *floatVal
}
case *float32:
float32Val := float32(*v)
metricPoint.Value = float64(float32Val)
case *uint8, *uint64, *uint16, *uint32:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
}
case *int8, *int16, *int32, *int64:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
}
}
sort.Strings(groupBy)
key := strings.Join(groupBy, "")
attributesMap[key] = groupAttributes
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
}
var seriesList []*basemodel.Series
for key := range metricPointsMap {
points := metricPointsMap[key]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len(points) != 0 && len(points) > 1 {
points = points[1:]
}
attributes := attributesMap[key]
series := basemodel.Series{Labels: attributes, Points: points}
seriesList = append(seriesList, &series)
}
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil {
// zap.S().Error("Error in dropping temporary table: ", err)
// return nil, err
// }
if hash == "" {
return seriesList, hash, nil
} else {
return seriesList, "getSubTreeSpans" + hash, nil
}
}
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.S().Debugf("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
// process the query to fetch subTree query
var subtreeInput string
query, subtreeInput, hash = processQuery(query, hash)
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil {
zap.S().Error("Error in dropping temporary table: ", err)
return query, hash, err
}
// Create temporary table to store the getSubTreeSpans() results
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil {
zap.S().Error("Error in creating temporary table: ", err)
return query, hash, err
}
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput
// Execute the subTree query
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.S().Info(getSpansSubQuery)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return query, hash, fmt.Errorf("Error in processing sql query")
}
var searchScanResponses []basemodel.SearchSpanDBResponseItem
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
// Fetch all the spans from of same TraceID so that we can build subtree
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("No spans found for the given query")
}
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return query, hash, fmt.Errorf("Error in processing sql query")
}
// Process model to fetch the spans
zap.S().Debugf("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem
json.Unmarshal([]byte(item.Model), &jsonItem)
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
if jsonItem.Events == nil {
jsonItem.Events = []string{}
}
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil {
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
return query, hash, err
}
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil {
zap.S().Error("Error in preparing batch statement: ", err)
return query, hash, err
}
for _, span := range treeSearchResponse {
var parentID string
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
parentID = span.References[0].SpanId
}
err = statement.Append(
time.Unix(0, int64(span.TimeUnixNano)),
span.TraceID,
span.SpanID,
parentID,
span.RootSpanID,
span.ServiceName,
span.Name,
span.RootName,
uint64(span.DurationNano),
int8(span.Kind),
span.TagMap,
span.Events,
)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
return query, hash, err
}
}
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
err = statement.Send()
if err != nil {
zap.S().Error("Error in sending statement: ", err)
return query, hash, err
}
return query, hash, nil
}
func processQuery(query string, hash string) (string, string, string) {
re3 := regexp.MustCompile(`getSubTreeSpans`)
submatchall3 := re3.FindAllStringIndex(query, -1)
getSubtreeSpansMatchIndex := submatchall3[0][1]
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
sqlCompleteIndex := 0
countParenthesisImbalance := 0
for i, char := range query2countParenthesis {
if string(char) == "(" {
countParenthesisImbalance += 1
}
if string(char) == ")" {
countParenthesisImbalance -= 1
}
if countParenthesisImbalance == 0 {
sqlCompleteIndex = i
break
}
}
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
// hash the subtreeInput
hmd5 := md5.Sum([]byte(subtreeInput))
hash = fmt.Sprintf("%x", hmd5)
// Reformat the query to use the getSubTreeSpans function
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
return query, subtreeInput, hash
}
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
var spans []*model.SpanForTraceDetails
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
zap.S().Debug("Building Tree")
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
// Every span which was fetched from getSubTree Input SQL query is considered root
// For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{}
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
if targetSpan != nil {
break
}
if err != nil {
zap.S().Error("Error during BreadthFirstSearch(): ", err)
return nil, err
}
}
if targetSpan == nil {
return nil, nil
}
// Build subtree for the target span
// Mark the target span as root by setting parent ID as empty string
targetSpan.ParentID = ""
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
// Get the subtree child spans
for i := 0; len(preParents) != 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
resultSpans := children
// Add the target span to the result spans
resultSpans = append(resultSpans, targetSpan)
for _, item := range resultSpans {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
TimeUnixNano: item.TimeUnixNano,
SpanID: item.SpanID,
TraceID: item.TraceID,
ServiceName: item.ServiceName,
Name: item.Name,
Kind: item.Kind,
References: references,
DurationNano: item.DurationNano,
TagMap: item.TagMap,
Events: item.Events,
HasError: item.HasError,
RootSpanID: getSpansSubQueryDBResponse.SpanID,
RootName: targetSpan.Name,
}
}
}
return searchSpansResult, nil
}

View File

@@ -0,0 +1,29 @@
package db
import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/jmoiron/sqlx"
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/interfaces"
)
type ClickhouseReader struct {
conn clickhouse.Conn
appdb *sqlx.DB
*basechr.ClickHouseReader
}
func NewDataConnector(localDB *sqlx.DB, promConfigPath string, lm interfaces.FeatureLookup) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm)
return &ClickhouseReader{
conn: ch.GetConn(),
appdb: localDB,
ClickHouseReader: ch,
}
}
func (r *ClickhouseReader) Start(readerReady chan bool) {
r.ClickHouseReader.Start(readerReady)
}

View File

@@ -0,0 +1,222 @@
package db
import (
"errors"
"strconv"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
var spans []*model.SpanForTraceDetails
// Build a slice of spans from the payload
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
// Build span trees from the spans
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
targetSpan := &model.SpanForTraceDetails{}
// Find the target span in the span trees
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, targetSpanId)
if targetSpan != nil {
break
}
if err != nil {
zap.S().Error("Error during BreadthFirstSearch(): ", err)
return nil, err
}
}
// If the target span is not found, return span not found error
if targetSpan == nil {
return nil, errors.New("Span not found")
}
// Build the final result
parents := []*model.SpanForTraceDetails{}
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
preParent := targetSpan
for i := 0; i < levelUp+1; i++ {
if i == levelUp {
preParent.ParentID = ""
}
if spanLimit-len(preParent.Children) <= 0 {
parents = append(parents, preParent)
parents = append(parents, preParent.Children[:spanLimit]...)
spanLimit -= (len(preParent.Children[:spanLimit]) + 1)
preParent.ParentID = ""
break
}
parents = append(parents, preParent)
parents = append(parents, preParent.Children...)
spanLimit -= (len(preParent.Children) + 1)
preParent = preParent.ParentSpan
if preParent == nil {
break
}
}
// Get the child spans of the target span until the given levelDown and spanLimit
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
if spanLimit-len(parent.Children) <= 0 {
children = append(children, parent.Children[:spanLimit]...)
spanLimit -= len(parent.Children[:spanLimit])
break
}
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
// Store the final list of spans in the resultSpanSet map to avoid duplicates
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
resultSpansSet[targetSpan] = struct{}{}
for _, parent := range parents {
resultSpansSet[parent] = struct{}{}
}
for _, child := range children {
resultSpansSet[child] = struct{}{}
}
searchSpansResult := []basemodel.SearchSpansResult{{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
Events: make([][]interface{}, len(resultSpansSet)),
},
}
// Convert the resultSpansSet map to searchSpansResult
i := 0 // index for spans
for item := range resultSpansSet {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
referencesStringArray := []string{}
for _, item := range references {
referencesStringArray = append(referencesStringArray, item.ToString())
}
keys := make([]string, 0, len(item.TagMap))
values := make([]string, 0, len(item.TagMap))
for k, v := range item.TagMap {
keys = append(keys, k)
values = append(values, v)
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[0].Events[i] = []interface{}{
item.TimeUnixNano,
item.SpanID,
item.TraceID,
item.ServiceName,
item.Name,
strconv.Itoa(int(item.Kind)),
strconv.FormatInt(item.DurationNano, 10),
keys,
values,
referencesStringArray,
item.Events,
item.HasError,
}
i++ // increment index
}
return searchSpansResult, nil
}
// buildSpanTrees builds trees of spans from a list of spans.
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
// Build a map of spanID to span for fast lookup
var roots []*model.SpanForTraceDetails
spans := *spansPtr
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
for _, span := range spans {
if span.ParentID == "" {
roots = append(roots, span)
}
mapOfSpans[span.SpanID] = span
}
// Build the span tree by adding children to the parent spans
for _, span := range spans {
if span.ParentID == "" {
continue
}
parent := mapOfSpans[span.ParentID]
// If the parent span is not found, add current span to list of roots
if parent == nil {
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
roots = append(roots, span)
span.ParentID = ""
continue
}
span.ParentSpan = parent
parent.Children = append(parent.Children, span)
}
return roots, nil
}
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
queue := []*model.SpanForTraceDetails{spansPtr}
visited := make(map[string]bool)
for len(queue) > 0 {
current := queue[0]
visited[current.SpanID] = true
queue = queue[1:]
if current.SpanID == targetId {
return current, nil
}
for _, child := range current.Children {
if ok, _ := visited[child.SpanID]; !ok {
queue = append(queue, child)
}
}
}
return nil, nil
}

View File

@@ -0,0 +1,442 @@
package app
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof" // http profiler
"os"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/jmoiron/sqlx"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"go.signoz.io/signoz/ee/query-service/app/api"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/interfaces"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
type ServerOptions struct {
PromConfigPath string
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
}
// Server runs HTTP api service
type Server struct {
serverOptions *ServerOptions
conn net.Listener
ruleManager *rules.Manager
separatePorts bool
// public http router
httpConn net.Listener
httpServer *http.Server
// private http
privateConn net.Listener
privateHTTP *http.Server
// feature flags
featureLookup baseint.FeatureLookup
unavailableChannel chan healthcheck.Status
}
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
return nil, err
}
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
return nil, err
}
localDB.SetMaxOpenConns(10)
// initiate license manager
lm, err := licensepkg.StartManager("sqlite", localDB)
if err != nil {
return nil, err
}
// set license manager as feature flag provider in dao
modelDao.SetFlagProvider(lm)
readerReady := make(chan bool)
var reader interfaces.DataConnector
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath, lm)
go qb.Start(readerReady)
reader = qb
} else {
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
}
<-readerReady
rm, err := makeRulesManager(serverOptions.PromConfigPath,
baseconst.GetAlertManagerApiPrefix(),
serverOptions.RuleRepoURL,
localDB,
reader,
serverOptions.DisableRules)
if err != nil {
return nil, err
}
// start the usagemanager
usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn())
if err != nil {
return nil, err
}
err = usageManager.Start()
if err != nil {
return nil, err
}
telemetry.GetInstance().SetReader(reader)
apiOpts := api.APIHandlerOptions{
DataConnector: reader,
AppDao: modelDao,
RulesManager: rm,
FeatureFlags: lm,
LicenseManager: lm,
}
apiHandler, err := api.NewAPIHandler(apiOpts)
if err != nil {
return nil, err
}
s := &Server{
// logger: logger,
// tracer: tracer,
ruleManager: rm,
serverOptions: serverOptions,
unavailableChannel: make(chan healthcheck.Status),
}
httpServer, err := s.createPublicServer(apiHandler)
if err != nil {
return nil, err
}
s.httpServer = httpServer
privateServer, err := s.createPrivateServer(apiHandler)
if err != nil {
return nil, err
}
s.privateHTTP = privateServer
return s, nil
}
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := mux.NewRouter()
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddlewarePrivate)
apiHandler.RegisterPrivateRoutes(r)
c := cors.New(cors.Options{
//todo(amol): find out a way to add exact domain or
// ip here for alert manager
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
})
handler := c.Handler(r)
handler = handlers.CompressHandler(handler)
return &http.Server{
Handler: handler,
}, nil
}
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := mux.NewRouter()
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
apiHandler.RegisterRoutes(r)
apiHandler.RegisterMetricsRoutes(r)
apiHandler.RegisterLogsRoutes(r)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
})
handler := c.Handler(r)
handler = handlers.CompressHandler(handler)
return &http.Server{
Handler: handler,
}, nil
}
// loggingMiddleware is used for logging public api calls
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
// loggingMiddlewarePrivate is used for logging private api calls
// from internal services like alert manager
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
})
}
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
// we default to that status code.
return &loggingResponseWriter{w, http.StatusOK}
}
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
// Flush implements the http.Flush interface.
func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
}
})
}
func setTimeoutMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var cancel context.CancelFunc
// check if route is not excluded
url := r.URL.Path
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
defer cancel()
}
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
// initListeners initialises listeners of the server
func (s *Server) initListeners() error {
// listen on public port
var err error
publicHostPort := s.serverOptions.HTTPHostPort
if publicHostPort == "" {
return fmt.Errorf("baseconst.HTTPHostPort is required")
}
s.httpConn, err = net.Listen("tcp", publicHostPort)
if err != nil {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
if privateHostPort == "" {
return fmt.Errorf("baseconst.PrivateHostPort is required")
}
s.privateConn, err = net.Listen("tcp", privateHostPort)
if err != nil {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil
}
// Start listening on http and private http port concurrently
func (s *Server) Start() error {
// initiate rule manager first
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
err := s.initListeners()
if err != nil {
return err
}
var httpPort int
if port, err := utils.GetPort(s.httpConn.Addr()); err == nil {
httpPort = port
}
go func() {
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
zap.S().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
go func() {
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
if err != nil {
zap.S().Error("Could not start pprof server", zap.Error(err))
}
}()
var privatePort int
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
privatePort = port
}
fmt.Println("starting private http")
go func() {
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
zap.S().Info("private http server closed")
default:
zap.S().Error("Could not start private HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
return nil
}
func makeRulesManager(
promConfigPath,
alertManagerURL string,
ruleRepoURL string,
db *sqlx.DB,
ch baseint.Reader,
disableRules bool) (*rules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
if err != nil {
return nil, fmt.Errorf("failed to create pql engine : %v", err)
}
// notifier opts
notifierOpts := basealm.NotifierOptions{
QueueCapacity: 10000,
Timeout: 1 * time.Second,
AlertManagerURLs: []string{alertManagerURL},
}
// create manager opts
managerOpts := &rules.ManagerOptions{
NotifierOpts: notifierOpts,
Queriers: &rules.Queriers{
PqlEngine: pqle,
Ch: ch.GetConn(),
},
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: nil,
DisableRules: disableRules,
}
// create Manager
manager, err := rules.NewManager(managerOpts)
if err != nil {
return nil, fmt.Errorf("rule manager error: %v", err)
}
zap.S().Info("rules manager is ready")
return manager, nil
}

View File

@@ -0,0 +1,30 @@
package constants
import (
"os"
)
const (
DefaultSiteURL = "https://localhost:3301"
)
var LicenseSignozIo = "https://license.signoz.io/api/v1"
var SpanLimitStr = GetOrDefaultEnv("SPAN_LIMIT", "5000")
func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key)
if len(v) == 0 {
return fallback
}
return v
}
// constant functions that override env vars
// GetDefaultSiteURL returns default site url, primarily
// used to send saml request and allowing backend to
// handle http redirect
func GetDefaultSiteURL() string {
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
}

View File

@@ -0,0 +1,18 @@
package dao
import (
"fmt"
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
)
func InitDao(engine, path string) (ModelDao, error) {
switch engine {
case "sqlite":
return sqlite.InitDB(path)
default:
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
}
}

View File

@@ -0,0 +1,35 @@
package dao
import (
"context"
"net/url"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/model"
basedao "go.signoz.io/signoz/pkg/query-service/dao"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
type ModelDao interface {
basedao.ModelDao
// SetFlagProvider sets the feature lookup provider
SetFlagProvider(flags baseint.FeatureLookup)
DB() *sqlx.DB
// auth methods
PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
}

View File

@@ -0,0 +1,136 @@
package sqlite
import (
"context"
"fmt"
"net/url"
"strings"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.uber.org/zap"
)
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
if err != nil {
zap.S().Errorf("failed to generate token for SSO login user", err)
return "", model.InternalErrorStr("failed to generate token for the user")
}
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
userPayload.User.Id,
tokenStore.RefreshJwt), nil
}
func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
return false, apierr
}
if domain != nil && domain.SsoEnabled {
// sso is enabled, check if the user has admin role
userPayload, baseapierr := m.GetUserByEmail(ctx, email)
if baseapierr != nil || userPayload == nil {
return false, baseapierr
}
if userPayload.Role != baseconst.AdminGroup {
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
}
}
return true, nil
}
// PrecheckLogin is called when the login or signup page is loaded
// to check sso login is to be prompted
func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
// assume user is valid unless proven otherwise
resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
// check if email is a valid user
userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
if baseApiErr != nil {
return resp, baseApiErr
}
if userPayload == nil {
resp.IsUser = false
}
ssoAvailable := true
err := m.checkFeature(model.SSO)
if err != nil {
switch err.(type) {
case basemodel.ErrFeatureUnavailable:
// do nothing, just skip sso
ssoAvailable = false
default:
zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
return resp, model.BadRequest(err)
}
}
if ssoAvailable {
// find domain from email
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
var emailDomain string
emailComponents := strings.Split(email, "@")
if len(emailComponents) > 0 {
emailDomain = emailComponents[1]
}
zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
return resp, apierr
}
if orgDomain != nil && orgDomain.SsoEnabled {
// saml is enabled for this domain, lets prepare sso url
if sourceUrl == "" {
sourceUrl = constants.GetDefaultSiteURL()
}
// parse source url that generated the login request
var err error
escapedUrl, _ := url.QueryUnescape(sourceUrl)
siteUrl, err := url.Parse(escapedUrl)
if err != nil {
zap.S().Errorf("failed to parse referer", err)
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
}
// build Idp URL that will authenticat the user
// the front-end will redirect user to this url
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
if err != nil {
zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
return resp, model.InternalError(err)
}
// set SSO to true, as the url is generated correctly
resp.SSO = true
}
}
return resp, nil
}

View File

@@ -0,0 +1,212 @@
package sqlite
import (
"context"
"database/sql"
"encoding/json"
"net/url"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
// StoredDomain represents stored database record for org domain
type StoredDomain struct {
Id uuid.UUID `db:"id"`
Name string `db:"name"`
OrgId string `db:"org_id"`
Data string `db:"data"`
CreatedAt int64 `db:"created_at"`
UpdatedAt int64 `db:"updated_at"`
}
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
domainIdStr = strings.Replace(v[0], ":", "-", -1)
}
}
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.S().Errorf("failed to parse domain id from relay state", err)
return nil, fmt.Errorf("failed to parse response from IdP response")
}
domain, err := m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain received in IdP response", err.Error())
return nil, fmt.Errorf("invalid credentials")
}
return domain, nil
}
// GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
if err != nil {
if err == sql.ErrNoRows {
return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
}
return nil, model.InternalError(err)
}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return domain, model.InternalError(err)
}
return domain, nil
}
// ListDomains gets the list of auth domains by org id
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
domains := []model.OrgDomain{}
stored := []StoredDomain{}
err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
if err != nil {
if err == sql.ErrNoRows {
return []model.OrgDomain{}, nil
}
return nil, model.InternalError(err)
}
for _, s := range stored {
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
if err := domain.LoadConfig(s.Data); err != nil {
zap.S().Errorf("ListDomains() failed", zap.Error(err))
}
domains = append(domains, domain)
}
return domains, nil
}
// CreateDomain creates a new auth domain
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil {
domain.Id = uuid.New()
}
if domain.OrgId == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
}
configJson, err := json.Marshal(domain)
if err != nil {
zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed"))
}
_, err = m.DB().ExecContext(ctx,
"INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
domain.Id,
domain.Name,
domain.OrgId,
configJson,
time.Now().Unix(),
time.Now().Unix())
if err != nil {
zap.S().Errorf("failed to insert domain in db", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed"))
}
return nil
}
// UpdateDomain updates stored config params for a domain
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil {
zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain update failed"))
}
configJson, err := json.Marshal(domain)
if err != nil {
zap.S().Errorf("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed"))
}
_, err = m.DB().ExecContext(ctx,
"UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
configJson,
time.Now().Unix(),
domain.Id)
if err != nil {
zap.S().Errorf("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed"))
}
return nil
}
// DeleteDomain deletes an org domain
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
if id == uuid.Nil {
zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain delete failed"))
}
_, err := m.DB().ExecContext(ctx,
"DELETE FROM org_domains WHERE id = $1",
id)
if err != nil {
zap.S().Errorf("domain delete failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain delete failed"))
}
return nil
}
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
if email == "" {
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
}
components := strings.Split(email, "@")
if len(components) < 2 {
return nil, model.BadRequest(fmt.Errorf("invalid email address"))
}
parsedDomain := components[1]
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, model.InternalError(err)
}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return domain, model.InternalError(err)
}
return domain, nil
}

View File

@@ -0,0 +1,63 @@
package sqlite
import (
"fmt"
"github.com/jmoiron/sqlx"
basedao "go.signoz.io/signoz/pkg/query-service/dao"
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
)
type modelDao struct {
*basedsql.ModelDaoSqlite
flags baseint.FeatureLookup
}
// SetFlagProvider sets the feature lookup provider
func (m *modelDao) SetFlagProvider(flags baseint.FeatureLookup) {
m.flags = flags
}
// CheckFeature confirms if a feature is available
func (m *modelDao) checkFeature(key string) error {
if m.flags == nil {
return fmt.Errorf("flag provider not set")
}
return m.flags.CheckFeature(key)
}
// InitDB creates and extends base model DB repository
func InitDB(dataSourceName string) (*modelDao, error) {
dao, err := basedsql.InitDB(dataSourceName)
if err != nil {
return nil, err
}
// set package variable so dependent base methods (e.g. AuthCache) will work
basedao.SetDB(dao)
m := &modelDao{ModelDaoSqlite: dao}
table_schema := `
PRAGMA foreign_keys = ON;
CREATE TABLE IF NOT EXISTS org_domains(
id TEXT PRIMARY KEY,
org_id TEXT NOT NULL,
name VARCHAR(50) NOT NULL UNIQUE,
created_at INTEGER NOT NULL,
updated_at INTEGER,
data TEXT NOT NULL,
FOREIGN KEY(org_id) REFERENCES organizations(id)
);`
_, err = m.DB().Exec(table_schema)
if err != nil {
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
}
return m, nil
}
func (m *modelDao) DB() *sqlx.DB {
return m.ModelDaoSqlite.DB()
}

View File

@@ -0,0 +1,20 @@
package signozio
type status string
const (
statusSuccess status = "success"
statusError status = "error"
)
type ActivationResult struct {
Status status `json:"status"`
Data *ActivationResponse `json:"data,omitempty"`
ErrorType string `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
type ActivationResponse struct {
ActivationId string `json:"ActivationId"`
PlanDetails string `json:"PlanDetails"`
}

View File

@@ -0,0 +1,159 @@
package signozio
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.uber.org/zap"
)
var C *Client
const (
POST = "POST"
APPLICATION_JSON = "application/json"
)
type Client struct {
Prefix string
}
func New() *Client {
return &Client{
Prefix: constants.LicenseSignozIo,
}
}
func init() {
C = New()
}
// ActivateLicense sends key to license.signoz.io and gets activation data
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
licenseReq := map[string]string{
"key": key,
"siteId": siteId,
}
reqString, _ := json.Marshal(licenseReq)
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
zap.S().Errorf("failed to connect to license.signoz.io", err)
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
}
httpBody, err := ioutil.ReadAll(httpResponse.Body)
if err != nil {
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
}
defer httpResponse.Body.Close()
// read api request result
result := ActivationResult{}
err = json.Unmarshal(httpBody, &result)
if err != nil {
zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
}
switch httpResponse.StatusCode {
case 200, 201:
return result.Data, nil
case 400, 401:
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
default:
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
}
}
// ValidateLicense validates the license key
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
validReq := map[string]string{
"activationId": activationId,
}
reqString, _ := json.Marshal(validReq)
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
}
defer response.Body.Close()
switch response.StatusCode {
case 200, 201:
a := ActivationResult{}
err = json.Unmarshal(body, &a)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
}
return a.Data, nil
case 400, 401:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io"))
default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io"))
}
}
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, POST, url, body)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", contentType)
return req, err
}
// SendUsage reports the usage of signoz to license server
func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
reqString, _ := json.Marshal(usage)
req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
return model.BadRequest(errors.Wrap(err, "unable to create http request"))
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
}
body, err := io.ReadAll(res.Body)
if err != nil {
return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
}
defer res.Body.Close()
switch res.StatusCode {
case 200, 201:
return nil
case 400, 401:
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io"))
default:
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io"))
}
}

Some files were not shown because too many files have changed in this diff Show More