Compare commits

...

151 Commits
v0.5.0 ... v0.6

Author SHA1 Message Date
Ankit Nayan
5510c67dbf release: v0.6.1 2022-02-11 16:22:51 +05:30
Ankit Nayan
347d73022a Merge branch 'main' into develop 2022-02-11 16:17:00 +05:30
Ankit Nayan
79e43f594d Merge pull request #702 from palash-signoz/signup-unmount-state
bug: signup state is now not toggled when component is not toggled
2022-02-11 16:14:41 +05:30
Ankit Nayan
0df3052e13 Merge pull request #677 from SoniaisMad/fix/dashboard-api-call-apply-button
fix: Dashboard page does not call API on clicking on apply button
2022-02-11 16:09:19 +05:30
Ankit Nayan
43983bc643 Merge pull request #701 from palash-signoz/filename-hashed
feat: now webpack filename are hashed
2022-02-11 16:05:18 +05:30
Ankit Nayan
4b9ef95f7a Merge pull request #706 from palash-signoz/700-widget-error
bug(FE): error state in the bar panel is added
2022-02-11 16:03:44 +05:30
Palash gupta
3db790c3c7 bug: merge conflit is resolved 2022-02-11 15:52:04 +05:30
Palash gupta
7d68e9cebc Merge branch 'develop' into 700-widget-error 2022-02-11 15:49:04 +05:30
Ankit Nayan
1e6df307a0 Merge pull request #712 from palash-signoz/full-view-graph-legend-fix
bug: full view legend is now fixed
2022-02-11 15:42:38 +05:30
Ankit Nayan
e6a53d6c06 Merge pull request #711 from palash-signoz/dashboard-graphs
bug: dashboard graph is now fixed
2022-02-11 15:38:35 +05:30
Palash gupta
db9052ea6e bug: full view legend is now fixed 2022-02-11 15:00:00 +05:30
Palash gupta
45eb201efd bug: dashboard graph is now fixed 2022-02-11 14:09:05 +05:30
Palash gupta
744dfd010a chore: modal is updated in the error state 2022-02-11 12:00:46 +05:30
Palash gupta
dc737f385a bug: in the error state bar panel is added 2022-02-10 22:20:31 +05:30
Palash gupta
0ae5b824d9 bug: signup state is now not toggled when component is not toggled 2022-02-10 16:44:38 +05:30
Palash gupta
828bd3bac6 feat: now webpack filename are hashed 2022-02-10 16:37:14 +05:30
Ankit Nayan
10f4fb53ac Merge pull request #699 from Tazer/fix-constants-alertmanager
fix: added support for custom alertmanager url
2022-02-10 11:46:12 +05:30
Patrik
fdc8670fab fix: added support for custom alertmanager url 2022-02-09 22:05:27 +01:00
Pranay Prateek
d7fa503f04 Update README.md 2022-02-10 00:28:00 +05:30
Ankit Nayan
b37bc0620d Merge pull request #696 from SigNoz/release/v0.6.0
Release/v0.6.0
2022-02-09 23:32:52 +05:30
Ankit Nayan
9bf37b391e release: v0.6.0 2022-02-09 21:52:59 +05:30
Prashant Shahi
a5bf4c1a61 ci(push): 👷 push workflow update (#695)
* ci(push): 👷 remove prefix v for docker images

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* ci(push): 👷 remove path trigger and update tags regex

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-09 21:46:07 +05:30
Prashant Shahi
420f601a68 ci(push): 👷 add develop branch and remove second tag (#694)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-09 20:35:25 +05:30
palash-signoz
03eac8963f chore: Env fix (#693)
* bug(UI): frontend build is fixed

* chore; build is fixed

* chore: build is fixed
2022-02-09 17:22:21 +05:30
palash-signoz
ffd2c9b466 bug(UI): frontend build is fixed (#692) 2022-02-09 16:47:34 +05:30
Prashant Shahi
51b11d0119 fix(makefile): 🩹 buildx fix for pushing images 2022-02-09 16:00:53 +05:30
Ankit Nayan
c5ee8cd586 chore: merging main to develop 2022-02-09 14:59:40 +05:30
palash-signoz
acbe7f91cb bug(UI): optimisation config is updated (#650) 2022-02-09 11:50:29 +05:30
Pranshu Chittora
07f4fcb216 fix(FE): Sidebar navigation when collapsed (#686)
* Update README.md

* ci(k3s): 💚 fix correct raw github URL for hotrod (#661)

Signed-off-by: Prashant Shahi <prashant@signoz.io>
(cherry picked from commit d92a3e64f5)

* chore: 🚚 rename config .yaml to yml for behaviorbot (#673)

Signed-off-by: Prashant Shahi <prashant@signoz.io>
(cherry picked from commit cd04a39d3d)

* fix(FE): sidebar navigation when collapsed

Co-authored-by: Pranay Prateek <pranay@signoz.io>
Co-authored-by: Prashant Shahi <prashant@signoz.io>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-02-09 11:48:54 +05:30
palash-signoz
1ee2e302e2 Feature(FE): signup page (#642)
* chore: icon is updated

* feat: signup page design is updated

* chore: set get user pref is added

* chore: svg is added

* feat: signup page is updated

* feat: signup page is updated
2022-02-09 11:44:08 +05:30
palash-signoz
be8ec756c6 Feat (UI) :Trace Filter page is updated (#684)
* dayjs and less loader is added

* webpack config is added

* moment is removed

* useDebounceFunction hook is made

* old components and reducer is removed

* search is updated

* changes are upadted for the trace page as skeleton is ready

* chore: method is change from dayjs

* convertObject into params is updated

* initial filters are updated

* initial and final filter issue is fixed

* selection of the filter is updated

* filters are now able to selected

* checkbox disable when loading is in progress

* chore: getFilter filename is updated

* feat: clear all and exapanded filter is updated

* chore: clearAll and expand panel is updated

* feat: useClickOutSide hook is added

* chore: get filter url becomes encoded

* chore: get tag filters is added

* feat: search tags is wip

* bug: global max,min on change bug is resolved

* chore: getInitial filter is updated

* chore: expand panel is updated

* chore: get filter is updated

* chore: code smells is updated

* feat: loader is added in the panel header to show the loading

* chore: search tags in wip

* chore: button style is updated

* chore: search in wip

* chore: search ui is updated from the global state

* chore: search in wip

* chore: search is updated

* chore: getSpansAggregate section is updated

* useOutside click is updated

* useclickoutside hook is updated

* useclickoutside hook is updated

* parsing is updated

* initial filter is updated

* feat: trace table is updated

* chore: trace table is updated

* chore: useClickout side is updated for the search panel

* feat: unneccesary re-render and code is removed

* chore: trace table is updated

* custom component is removed and used antd search component

* error state is updated over search component

* chore: search bar is updated

* chore: left panel search and table component connection is updated

* chore: trace filter config is updated

* chore: for graph reducer is updated

* chore: graph is updated

* chore: table is updated

* chore: spans is updated

* chore: reducer is updated

* chore: graph component is updated

* chore: number of graph condition is updated

* chore: input and range slider is now sync

* chore: duration is updated

* chore: clearAllFilter is updated

* chore: duration slider is updated

* chore: duration is updated and panel body loading is updated

* chore: slider container is added to add padding from left to right

* chore: Select filter is updated

* chore: duration filter is updated

* chore: Divider is added

* chore: none option is added in both the dropdown

* chore: icon are updated

* chore: added padding in the pages component

* chore: none is updated

* chore: antd notification is added in the redux action

* chore: some of the changes are updated

* chore: display value is updated for the filter panel heading

* chore: calulation is memorised

* chore: utils function are updated in trace reducer

* chore: getFilters are updated

* tracetable is updated

* chore: actions is updated

* chore: metrics application is updated

* chore: search on clear action is updated

* chore: serviceName panel position is updated

* chore: added the label in the duration

* bug: edge case is fixed

* chore: some more changes are updated

* chore: some more changes are updated

* chore: clear all is fixed

* chore: panel heading caret is updated

* chore: checkbox is updated

* chore: isError handler is updated over initial render

* chore: traces is updated

* fix: tag search is updated

* chore: loading is added in the trace table and soring is introduced in the trace table

* bug: multiple render for the key is fixed

* Bug(UI): new suggestion is updated

* feat: isTraceFilterEnum function is made

* bug: new changes are updated

* chore: get Filter is updated

* chore: application metrics params is updated

* chore: error is added in the application metrics

* chore: filters is updated

* chore: expand panel edge case is updated

* chore: expand panel is updated and utls: updateUrl function is updated

* chore: reset trace state when unmounted

* chore: getFilter action is updated

* chore: api duration is updated

* chore: useEffect dependency is updated

* chore: filter is updated with the new arch

* bug: trace table issue is resolved

* chore: application rps url is updated for trace

* chore: duration filter is updated

* chore: search key is updated

* chore: filter is added in the search url

* bug: filter is fixed

* bug: filter is fixed

* bug: filter is fixed

* chore: reset trace data when unmounted

* chore: TopEnd point is added

* chore: getInitialSpanAggregate action is updated

* chore: application url is updated

* chore: no tags placeholder is updated

* chore: flow from customer is now fixed

* chore: search is updated

* chore: select all button is removed

* chore: prev filter is removed to show the result

* chore: config is updated

* chore: checkbox component is updated

* chore: span filter is updated

* chore: graph issue is resolved

* chore: selected is updated

* chore: all filter are selected

* feat: new trace page is updated

* chore: utils is updated

* feat: trace filter page is updated

* chore: duration is now fixed

* chore: duration clear filter is added

* chore: onClickCheck is updated

* chore: trace filter page is updated

* bug: some of bugs are resolved

* chore: duration body is updated

* chore: topEndPoint and application query is updated

* chore: user selection is updated in the duration filter

* chore: panel duration is updated

* chore: panel duration is updated

* chore: duration bug is solved

* chore: function display value is updated
2022-02-09 11:31:13 +05:30
Prashant Shahi
2de6574835 chore(k3s): 🩹 set up hotrod at start 2022-02-08 23:29:26 +05:30
Prashant Shahi
1acf009e62 docs: 📝 use 3301 for frontend port in README 2022-02-08 23:14:36 +05:30
Prashant Shahi
d22d1d1c3b refactor(ports): 💥 avoid exposing unnecessary ports and update frontend port to 3301 (#679)
* refactor(compose-yaml): ♻️ avoid unused and unnecessary ports mapping from compose files

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* refactor(frontend): 💥 change frontend port to 3301
BREAKING CHANGE:

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-08 22:47:06 +05:30
Prashant Shahi
6342e1cebc docs(deploy): 📝 Add README docs for deploy (#669)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-08 17:50:02 +05:30
Vishal Sharma
f74467e33c fix: exclude operation in trace APIs (#682) 2022-02-08 17:45:40 +05:30
palash-signoz
821b80acde chore: external address query is updated (#685) 2022-02-08 16:37:06 +05:30
Prashant Shahi
d41502df98 chore(helm-charts): 🚚 migrate helm charts to SigNoz/charts repository (#667)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-08 15:35:40 +05:30
Ankit Nayan
c1d4dc2ad6 fix: exclude added for status field (#681)
* fix: exclude added for status field

* chore: extracted status filtering to a function
2022-02-08 13:28:56 +05:30
Siddhant Khare
07183d5189 Gitpodify the Signoz (#634)
* chore(docs): updated lines of frontend & query sec

* fix: update baseURL for local & gitpod

* chore: allow all for dev to run on https

* chore(docs): add maintainer note at docker-compose

* chore: update gitignore to ignore .db & logs

* chore: upd lines of fe & query-service & notes

* feat: gitpodify the signoz with all envs. & ports

* fix: relative path of .scripts dir

* chore(ci): distribute tasks in gitpod.yml

* fix: run docker image while init

* fix: add empty url option for `baseURL`
2022-02-08 10:27:52 +05:30
Prashant Shahi
57992134bc chore: 🚚 rename config .yaml to yml for behaviorbot (#673)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
(cherry picked from commit cd04a39d3d)
2022-02-07 12:52:10 +05:30
Sonia Manoubi
e0a7002a29 fix: api call on apply button 2022-02-04 15:55:36 +01:00
Prashant Shahi
cd04a39d3d chore: 🚚 rename config .yaml to yml for behaviorbot
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-02 16:41:13 +05:30
Prashant Shahi
c372eac3e3 docs(contributing): 📝 Add Helm Chart contribute instructions (#668)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-02 15:22:35 +05:30
Vishal Sharma
fdd9287847 Fix 414 errors trace filter API (#660)
* fix: change trace filter APIs method from GET to POST

* fix: error filter param

* fix: json of aggregate params
2022-02-02 11:40:30 +05:30
Prashant Shahi
24f1404741 fix(compose-yaml): 🩹 infer max-file logging option as string
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-02-02 03:41:56 +05:30
Prashant Shahi
48ac20885f refactor(query-service): ♻️ Update ldflags and Makefile for dynamic versioning (#655)
* refactor(query-service): ♻️  Update ldflags and Makefile for dynamic versioning

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: 🎨 Use blacnk spaces indentation in build details

* chore(query-service): 🎨  small build details format changes

* refactor(query-service): ♻️ refactor ldflags for go build
2022-02-01 23:03:16 +05:30
Mustaque Ahmed
ebb1c2ac79 fix: remove table default.signoz_spans from the codebase (#656)
* fix: remove table `default.signoz_spans` from the codebase

* fix: remove spanTable and archiveSpanTable from clickHouseReader
2022-02-01 10:26:31 +05:30
Prashant Shahi
e3c4bfce52 ci(k3s): 💚 fix correct raw github URL for hotrod (#661)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
(cherry picked from commit d92a3e64f5)
2022-01-31 19:07:45 +05:30
Prashant Shahi
d92a3e64f5 ci(k3s): 💚 fix correct raw github URL for hotrod
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-31 18:24:05 +05:30
Prashant Shahi
24162f8f96 chore(log-option): 🔧 set hotrod log options for hotrod app (#659)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-30 23:39:06 +05:30
Pranay Prateek
c7ffac46f5 Update README.md 2022-01-30 22:46:01 +05:30
Prashant Shahi
0d1526f6af docs: 📝 reverting minor docs changes
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-29 01:33:34 +05:30
Prashant Shahi
b0d68ac00f chore: install script improvements (#652)
* chore(install): 🔨  install script improvement

- remove ipify
- migrate from PostHog to Segment
- single function for sending event

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore: ⚰️ remove commented code

* chore(install): 🛂 update the auth token

* chore(install): 🔧 set context.default config true

* Revert "chore(install): 🔧 set context.default config true"

This reverts commit 0704013ac7.

* chore(install): 🔨 use uname sha for installation id

* refactor(slack): 🚚 use signoz.io/slack URL

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-29 01:20:25 +05:30
Prashant Shahi
8f0df5e1e3 ci(k3s): 🩹 simple fix as per the helm chart changes (#651) 2022-01-28 22:59:07 +05:30
Vishal Sharma
16fbbf8a0e exclude filter support and fix for not sending null string in groupby for aggregates API (#654)
* feat: add support to exclude filter params

* fix: null string in group by
2022-01-28 22:56:54 +05:30
Prashant Shahi
e823987eb0 build(docker): Two compose files for arm and amd (#638)
* build(docker): 🔨 Two compose files for arm and amd

* refactor(docker): ⚰️ remove env file from install script

* refactor: ⚰️ remove .gitkeep files from data folder

* chore(build): ⚰️ remove env files and update contributing docs

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* build: ♻️ use two compose files in Makefile

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* chore(docker): 🚚 revert back to using same dir and pin image tag

* Revert "chore: Add migration file path in otel collector config (#628)"

This reverts commit 8467d6a00c.

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-27 22:34:26 +05:30
Devesh Kumar
f5abab6766 Fixed svg color mismatch in light mode and dark mode (#504)
* fixed svg color mismatch in light mode and dark mode

Added props in parent file

fixed and added fillColor as props to the highest order of parent

* set React.CSSProperties

props renamed and code reused
2022-01-26 21:55:48 +05:30
Devesh Kumar
b55c362bbb Fixed toggle Button contrast in Light Theme (#505)
* fixed toggle Button contrast in Light Theme

refactored to styled props and fixed theme

set defaultChecked to isDarkMode value

* Refactored boolean logic
2022-01-26 21:55:11 +05:30
Anik Das
6e6fd9b44b closes #569: critical css using critters (#570)
* feat(ui): critical css inline using critters

Signed-off-by: Anik Das <anikdas0811@gmail.com>

* fix: remove duplicate preload key

Signed-off-by: Anik Das <anikdas0811@gmail.com>
2022-01-26 21:53:03 +05:30
palash-signoz
50a88a8726 BUG: refresh button is now fixed (#590)
* chore: issue is fixed

* chore: unused import is removed
2022-01-26 21:46:59 +05:30
Vishal Sharma
0f4e5c9ef0 change migration file path (#630)
* chore: Add migration file path in otel collector config

* Update otel-collector-config.yaml
2022-01-26 21:43:15 +05:30
Ankit Nayan
be5d1f0090 feat: adding disable and anonymous functionality to telemetry collected (#637)
* chore: changed lib

* chore: changed lib

* chore: changed lib

* chore: changed lib

* chore: changes in params

* chore: changes in params

* chore: moving telemetry to a separate package

* feat: enabling telemetry via env var

* chore: removing posthog api_key

* feat: send heartbeat every 6hr

* feat: enabled version in application

* feat: added getter and setter apis and struct for user preferences

* feat: added version to properties to event

* feat: added apis to set and get user preferences and get version

* chore: refactored get and set userPreferences apis to dao pattern

* chore: added checks for telemetry enabled and anonymous during initialization

* chore: changed anonymous user functionality

* chore: sanitization

* chore: added uuid for userPreferences to send when user is anonymous
2022-01-26 21:40:44 +05:30
Vishal Sharma
0ab91707e9 New Trace Filter Page API changes (Backend) (#646)
* build: integrate sql migrations for clickhouse

* feat: support error/exception attributes for trace

* chore: fixing dependencies for docker go client libs

* feat: get trace filter api checkpoint

* chore: fixing dependencies for go-migrate

* feat: add new columns

* feat: move mirgate run from docker to code

* fix: migration file 404 issue

* feat: getSpanFilter API

* fix: migrate version naming bug

* chore: change url param format to array

* feat: add getTagFilter API

* feat: add getFilteredSpans API

* fix: using OFFSET in sqlx driver

* feat: aggregates API on getFilteredSpan, use IN and NOT IN for tag filtering

* feat: add more function support to span aggregate API

* fix: null component edge case

* feat: groupBy support for filteredSpanAggregate

* feat: add function param to span aggregate API

* feat: add support to return totalSpans in getFilteredSpans API

* fix: don't return null string as keys in span filters

* chore: remove SQL migrations(moved to otel collector)

* fix: null string issue in aggregate API

* Merge main

* fix: trace API db query param

* fix: signoz sql db path

* fix: case when both error and ok status are selected

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-01-26 20:41:59 +05:30
Prashant Shahi
dcb17fb33a ci(k3s): k3s CI workflow enhancements (#643)
* ci(k3s): k3s CI workflow enhancements

* ci(k3s): 💚 Fix the names of deployment and statefulset

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-26 12:48:29 +05:30
Kartik Verma
9c07ac376d fix: update Contributing and Makefile for dev-setup (#599) 2022-01-25 16:31:19 +05:30
Prashant Shahi
40f9a4a5aa chore: ♻️ single manifest file for the hotrod (#639)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-25 12:29:55 +05:30
Prashant Shahi
8059fe14da chore: 🔧 Add behaviorbot config YAML (#640)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-25 11:33:08 +05:30
Ankit Nayan
2f665fcc63 chore: added version in clickhouse tag 2022-01-23 14:57:18 +05:30
Ankit Nayan
0e6a1082dc fix: init-db.sql restored 2022-01-23 14:53:44 +05:30
Vishal Sharma
1568075769 chore: update clean signoz command in install script (#631) 2022-01-22 23:16:09 +05:30
Prashant Shahi
cbbd3ce6ad chore: add codeowners for automatic review request (#633)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2022-01-22 16:08:19 +05:30
Vishal Sharma
af2399e627 Feat/support error tab page (#626)
* build: integrate sql migrations for clickhouse

* feat: support error/exception attributes for trace

* chore: fixing dependencies for docker go client libs

* chore: fixing dependencies for go-migrate

* feat: move mirgate run from docker to code

* fix: migration file 404 issue

* feat: error tab APIs

* chore: move migrations file

* chore: remove SQL migration (shifted to otel collector)

* chore: remove sql migration configs from dockerfile

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2022-01-21 00:31:58 +05:30
Ankit Nayan
50e8f32291 Revert "chore: Add migration file path in otel collector config (#628)" (#629)
This reverts commit 8467d6a00c.
2022-01-20 00:22:21 +05:30
Vishal Sharma
8467d6a00c chore: Add migration file path in otel collector config (#628)
* chore: Add migration file path in otel collector config

* Update otel-collector-config.yaml
2022-01-20 00:16:46 +05:30
Prashant Shahi
cac31072a9 ci: use pull_request_target for remove label permission (#618) 2022-01-17 21:35:44 +05:30
Yoni Bettan
8b47f4af21 ci: adding a dummy push to check if the image push workflow works (#609)
Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-17 16:12:18 +05:30
Yoni Bettan
b0b235cbc5 ci: making some improvements to e2e-k3s workflow (#615)
Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-17 15:59:27 +05:30
Yoni Bettan
e0e4c7afe6 ci: filtering 'push' workflow to main and release branches (#614)
Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-17 15:17:31 +05:30
Yoni Bettan
1eb0013352 ci: removing file filtering from some workflows (#610)
There are other files that can affect the correctness of the code rather
than the src files like the deployment yamls, Makefile etc.

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-17 11:40:17 +05:30
Prashant Shahi
68d68c2b57 fix(frontend): 📌 pin mini-css-extract-plugin version to 2.4.5 to fix breaking builds (#612) 2022-01-16 14:55:14 +05:30
Yoni Bettan
274f1fe07f Merge pull request #605 from ybettan/local-builds
ci: removing the timeout from the rollout command
2022-01-12 12:23:50 +02:00
Yoni Bettan
51dc54bcb9 ci: removing the timeout from the rollout command
It makes the flow fail for some reason.

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-12 11:45:09 +02:00
Yoni Bettan
0bc82237fc ci: making sure the sample-application is up before running the job (#603)
ci: making sure the sample-application is up before running the job

* tmp - timeout

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-12 14:57:14 +05:30
Yoni Bettan
53045fc58e ci: inject local images to k3d instead of publishing them (#600) 2022-01-11 14:42:56 +05:30
Yoni Bettan
9808c03d6d Merge pull request #593 from ybettan/push-workflow-pr
ci: adding 'push' workflow
2022-01-10 12:38:50 +02:00
Yoni Bettan
cf5036bc31 Merge pull request #597 from ybettan/helm-wait
ci: using --wait helm install flag instead of waiting manually
2022-01-10 12:38:43 +02:00
Yoni Bettan
e08bf85edf ci: using --wait helm install flag instead of waiting manually
Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-10 12:28:05 +02:00
Yoni Bettan
e555e05f58 ci: adding 'push' workflow
This workflow will push up to 2 images with 4 tags, depending on
if they changed since the last image.

* query-service:<git sha>
* query-service:master

* frontend:<git sha>
* frontend:master

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-09 13:30:33 +02:00
Yoni Bettan
88c3f50cb1 Merge pull request #596 from ybettan/remove-label
ci: requiring the 'ok-to-test' label for running some workflows
2022-01-09 13:27:54 +02:00
Yoni Bettan
e4ef059d19 ci: requiring the 'ok-to-test' label for running some workflows
As of now, the 'e2e-k3s' workflow will require the 'ok-to-test' label in
order to get triggered.

In addition to that, on each change to the PR on the relevant files,
Github will remove the label from it and it will be required again.

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-08 21:19:04 +02:00
Yoni Bettan
b433d4ad4a Revert "ci: requiring the 'ok-to-test' label for running some workflows (#592)" (#595)
This reverts commit b3d5d6c281.
2022-01-08 16:37:38 +05:30
Yoni Bettan
b3d5d6c281 ci: requiring the 'ok-to-test' label for running some workflows (#592)
* ci: adding 'e2e' GH workflows

The flow contains of multiple steps:

    * build 'query-service' and 'frontend' images and push them to the image registry
    * deploy a disposable k3s cluster
    * deploy the app on the cluster
    * set a tunnel to allow accessing the UI from the web browser

Signed-off-by: Yoni Bettan <ybettan@redhat.com>

* ci: requiring the 'ok-to-test' label for running some workflows

As of now, the 'e2e' workflow will require the 'ok-to-test' label in
order to get triggered.

In addition to that, on each change to the PR, Github will remove the
label from it and it will be required again.

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-08 12:51:46 +05:30
Yoni Bettan
9a2aa7bcbd ci: adding 'e2e' GH workflows (#579)
The flow contains of multiple steps:

    * build 'query-service' and 'frontend' images and push them to the image registry
    * deploy a disposable k3s cluster
    * deploy the app on the cluster
    * set a tunnel to allow accessing the UI from the web browser

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-08 12:44:14 +05:30
Pranay Prateek
63c2e67cfc Update CONTRIBUTING.md 2022-01-06 21:35:31 +05:30
Yoni Bettan
1b398039e3 Swapping images on the "build" GH workflow. (#578)
query-service job is currently building flattener and flattener job is
currently building query-service.

This PR should fix that mix.

Signed-off-by: Yoni Bettan <ybettan@redhat.com>
2022-01-04 16:17:16 +05:30
Ankit Nayan
f9c214bd53 release: v0.5.4 2021-12-24 13:25:14 +05:30
pal-sig
7d50895464 chore(query): query is updated for application and external graphs (#562) 2021-12-24 13:16:05 +05:30
Anurag Gupta
2031377dcb Feat(UI): Added webpack bundle analyser for dev and prod (#503)
* Added webpack bundle analyser for dev and prod

* Changes updated
2021-12-24 13:14:50 +05:30
pal-sig
d7eb9f7d0d fix(UI): cross env now will work fine (#485) 2021-12-24 13:14:41 +05:30
pal-sig
7f800c94ae fix(UI): latest timestamp bug is resolved (#494)
fix(UI): latest timestamp bug is resolved (#494)
2021-12-24 12:00:26 +05:30
Aryan Shridhar
0f39643a56 fix(UI): Restore theme preference after reloading (#469) (#473)
* Added helper functions for theme configs under lib/theme.
* Modified the id of theme stylsheet element to 'appMode'.
2021-12-24 11:57:29 +05:30
Hendy Irawan
3b687201a6 build(kubernetes): Support hcloud (Hetzner) (#537)
feat: using `hcloud` because it's Hetzner's own preferred short name.
2021-12-24 11:55:55 +05:30
Aryan Shridhar
f449775cd6 fix(BUG): Allow users to enter application if no sample data is provided (#478). (#538)
fix(BUG): Allow users to enter the application if no sample data is provided (#478). (#538)
2021-12-24 11:53:53 +05:30
pal-sig
ff2e9ae084 feat: tooltip is added (#501)
* tooltip is added

* fix: tooltip component is updated

* Tooltip component is updated

* settings tooltip is updated

* tooltip size is updated
2021-12-24 11:51:19 +05:30
pal-sig
7f5b0c15c7 Bug(UI): Signup onclick loading (#541)
* bug(UI): on click loader is added

* bug(UI): on click loader is added
2021-12-24 11:42:25 +05:30
pal-sig
d825fc2f30 fix: Antd tab issue (#507)
* fix(CSS): antd css tab issue is resolved

* removing redudant css
2021-12-24 11:41:45 +05:30
Vishal Sharma
55feec34ea fix: db/logger security bugs (#558) 2021-12-24 11:40:39 +05:30
Ankit Nayan
6f8b78bd97 chore: small change in api 2021-12-16 13:31:56 +05:30
Ankit Nayan
c4fa86bc95 feat: added memory limit to otel-collecor service (#514) 2021-12-15 00:04:55 +05:30
Ankit Nayan
14ea7bd86a release: v0.5.3 2021-12-11 11:42:12 +05:30
Ankit Nayan
8bf0123370 chore: changed default sample alert to High RPS (#497) 2021-12-11 11:26:02 +05:30
Ankit Nayan
dc9ffcdd45 feat: helm chart for clickhouse setup (#479)
* minor change for volume permission spec

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added basic files of clickhouse chart

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added a simple deployment yaml

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added clickhouse support in signoz

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* clickhouse working

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* chore: helm charts wip

* chore: fixing path of otel-path in hotrod

* chore: wip running clickhouse in templates

* chore: clickhouse working in templates

* chore: clickhouse helm chart upgraded to latest query-service and frontend images

* chore: cleanup and upgrading signoz chart version

* chore: adding alertmanager and minor fixes

* chore: persistence enabled for query-service and clickhouse

* chore: scrape interval reduced to 30s

* chore: changed crd api version from v1beta1 to v1

* chore: removed druid parts in values.yaml

* chore: log container removed from clickhouse

* chore: removed *.tgz from gitignore to add charts

* chore: added dependency charts

* chore: added clickhouse-operator templates

Co-authored-by: Yash Sharma <yashrsharma44@gmail.com>
2021-12-11 11:08:25 +05:30
pal-sig
c79223742f fix(UI): graph legends is fixed (#461)
* fix(UI): graph legends is fixed

* chore(UI): some changes regarding the color of the chart is updated

* full view css is fixed

* usage explorer graph is fixed

* default query is removed

* fix: scroll is removed
2021-12-10 14:28:31 +05:30
Aryan Shridhar
d9a99827c0 fix(BUG): Allow webpack to link CSS with bundled HTML file. (#468)
While porting the webpack config file from typescript to
javascript, commit 3e0f5a866d accidentally removed
style-loader from config setting. Due to this, the CSS file
wasn't being attached with the bundled html file.

Fixed this by adding the style-loader back to config settings
in webpack.
2021-12-10 13:30:57 +05:30
Anurag Gupta
1bf6faff8b feedback btn fixed (#458) 2021-12-10 13:28:58 +05:30
Ankit Nayan
95fb068bb0 Revert "Feat(UI): Storybook is added (#456)" (#482)
This reverts commit b27e30db58.
2021-12-10 13:28:01 +05:30
Rishit Pandey
0907ed280b Fix crlf line break (#455)
* change line-break rule according to OS

* strict comparison
2021-12-10 13:27:14 +05:30
Aryan Shridhar
fc7a0a8354 fix(UI): Allow empty input values in settings retention page. (#459) 2021-12-10 13:27:07 +05:30
pal-sig
b27e30db58 Feat(UI): Storybook is added (#456)
* feat(storybook): storybook is added

* spinner story is added

* package.json is updated
2021-12-10 11:29:14 +05:30
pal-sig
1ab291f3e8 fix: height for the top nav bar is fixed (#462) 2021-12-10 11:28:11 +05:30
pal-sig
552d193cef fix: cluttering issue is fixed (#471)
* fix: cluttering issue is fixed

* fix: cluttering issue is fixed
2021-12-10 11:27:45 +05:30
pal-sig
ba0f06f381 fix(BUG): localstorage permission is updated (#477) 2021-12-10 11:26:20 +05:30
Vishal Sharma
bbbb1c1d60 docs(contributing.md): update contributing doc for latest SigNoz release (#466) 2021-12-07 12:17:39 +05:30
Vishal Sharma
32a09d4ca2 chore(contributing.md): update contributing doc for latest SigNoz release (#465) 2021-12-07 11:59:45 +05:30
pal-sig
7ae43cf511 fix(UI): portFinder sync is added (#439) 2021-12-05 17:08:27 +05:30
Ankit Nayan
d1887fdbfe release: v0.5.2 2021-12-03 18:58:53 +05:30
pal-sig
5414a73b40 bug(fix): refresh is fixed in the application page (#445) 2021-12-03 18:42:45 +05:30
pal-sig
7f116d1597 bug(fix): refresh is fixed in the metrics application page (#452) 2021-12-03 18:42:31 +05:30
Ankit Nayan
2c1b530aa0 chore: changed otel scraping interval of prometheus targets to 30s from 60s 2021-12-03 18:03:46 +05:30
Ankit Nayan
231b8467fd release: v0.5.1 2021-12-02 20:50:02 +05:30
pal-sig
24910f6a39 chore(UI): unused packages are removed (#419) 2021-12-02 20:45:02 +05:30
pal-sig
03bf9afe03 feat(UI): Eslint fixes (#418)
* chore(UI): port finder is removed

* chore(UI): webpack config is updated

* fix(UI): eslint error and fixes are updated
2021-12-02 20:32:08 +05:30
pal-sig
fbf047a477 fix(UI): icon is updated (#438) 2021-12-02 20:21:26 +05:30
pal-sig
afc0559456 feat(UI): sendfeedback is updated (#416)
* feat(UI): sendfeedback is updated

* chore(UI): config slack hook url is updated

* fix(chore): button size is updated

* fix(bug): user feedback is updated

* chore(bug): z-index is fixed

* fix(bug): applayout is updated

* fix(bug): applayout is updated
2021-12-02 20:12:38 +05:30
Vishal Sharma
34e9247562 use clickhouse arm64 docker build by altinity for arm64 devices(M1 Macs) (#429)
* use clickhouse arm64 docker build by altinity for arm64 devices(M1 Macs)

* update command to bring down SigNoz docker containers
2021-12-02 19:50:27 +05:30
pal-sig
bbd90bff0c feat(UI): bundlesize is added (#420) 2021-12-02 19:42:35 +05:30
pal-sig
3e0f5a866d fix(BUG): experimental changes are removed (#436)
* fix(BUG): experimental changes are removed

* fix(BUG): experimental changes are removed
2021-12-02 19:31:02 +05:30
pal-sig
fb634303e8 Remove time filter alert page (#412)
* fix(FE): removed time filter from settings page #374

* declared an array consisting of routes,in which we won't have to render time filter component

* fix(UI): global down is removed from the alerts page

Co-authored-by: Mohmin2 <mohmin@expansionjs.com>
Co-authored-by: Mohmn <naqashmohmin1@gmail.com>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-12-02 19:05:37 +05:30
Mohmn
5e828bf174 fix(FE): removed time filter from settings page #374 (#385)
* fix(FE): removed time filter from settings page #374

* declared an array consisting of routes,in which we won't have to render time filter component

Co-authored-by: Mohmin2 <mohmin@expansionjs.com>
2021-12-02 19:04:13 +05:30
pal-sig
8f2ed0e46f bug(UI): default tab over setting is resolved (#415) 2021-12-02 19:03:31 +05:30
pal-sig
19b25219f4 feat(UI): Auto refresh (#411)
* feat(hook): useInterval hook is made

* feat(UI): alert rules and grouped alerts are fetched automatically within in 30sec
2021-12-02 18:59:03 +05:30
pal-sig
447700326a fix(BUG): not found is fixed (#405)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-12-02 18:53:05 +05:30
pal-sig
3ed4fb2b75 FE(UI):Channels test (#417)
* bug(UI): default tab over setting is resolved

* feat(UI): channels test is updated
2021-12-02 18:47:40 +05:30
pal-sig
32750fa2af FEAT(UI): Test case for alerts (#414)
* chore(UI): test case is fixed

* chore(UI): test case for the alerts is updated
2021-12-02 18:38:49 +05:30
Ankit Nayan
47b0671b27 feat: product feedback (#431)
* feat: submit product feedback API

* chore: added empty message check in submit feedback API
2021-12-02 18:37:33 +05:30
pal-sig
9bc62d83d3 feat(UI): web-vitals is added (#422) 2021-12-02 18:36:30 +05:30
Rishit Pandey
5e4cff7ae2 Improve ESLint Rules #409 (#426)
* add no-array-index-key rule

* fix array indexing errors

* convert string concat to template strings

* make component key simpler

* remove unused var
2021-12-02 18:34:31 +05:30
pal-sig
271ffbd1a1 fix(BUG): error text is show to the user (#427)
* fix(BUG): error text is show to the user

* fix(bug): retention condition is updated
2021-12-02 18:31:55 +05:30
Ankit Nayan
5b691d26e4 chore: changes in user api (#430) 2021-12-02 18:31:05 +05:30
pal-sig
6b6070fd45 fix(BUG): signup is updated (#432) 2021-12-02 18:30:51 +05:30
317 changed files with 12119 additions and 11176 deletions

6
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1,6 @@
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
# Owners are automatically requested for review for PRs that changes code
# that they own.
* @ankitnayan
/frontend/ @palash-signoz
/deploy/ @prashant-shahi

31
.github/config.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
# Configuration for welcome - https://github.com/behaviorbot/welcome
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
# Comment to be posted to on first time issues
newIssueWelcomeComment: >
Thanks for opening this issue. A team member should give feedback soon.
In the meantime, feel free to check out the [contributing guidelines](https://github.com/signoz/signoz/blob/main/CONTRIBUTING.md).
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
# Comment to be posted to on PRs from first time contributors in your repository
newPRWelcomeComment: >
Welcome to the SigNoz community! Thank you for your first pull request and making this project better. 🤗
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
# Comment to be posted to on pull requests merged by a first time user
firstPRMergeComment: >
Congrats on merging your first pull request!
![minion-party](https://i.imgur.com/Xlg59lP.gif)
We here at SigNoz are proud of you! 🥳
# Configuration for request-info - https://github.com/behaviorbot/request-info
# Comment to be posted in issues or pull requests, when no description is provided.
requestInfoReplyComment: >
We would appreciate it if you could provide us with more info about this issue/pr!
requestInfoLabelToAdd: request-more-info

View File

@@ -1,6 +1,18 @@
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
# Github actions
#### Environment Variables
## Testing the UI manually on each PR
First we need to make sure the UI is ready
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
* This job will run until the PR is merged or closed to keep the local tunneling alive
- github will cancel this job if the PR wasn't merged after 6h
- if the job was cancel, go to the action and press `Re-run all jobs`
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
## Environment Variables
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
<table>
<tr>

View File

@@ -61,7 +61,7 @@ jobs:
- name: Build query-service image
shell: bash
run: |
make build-flattener-amd64
make build-query-service-amd64
build-flattener:
runs-on: ubuntu-latest
@@ -74,4 +74,4 @@ jobs:
- name: Build flattener docker image
shell: bash
run: |
make build-query-service-amd64
make build-flattener-amd64

87
.github/workflows/e2e-k3s.yaml vendored Normal file
View File

@@ -0,0 +1,87 @@
name: e2e-k3s
on:
pull_request:
types: [labeled]
jobs:
e2e-k3s:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'ok-to-test' }}
env:
DOCKER_TAG: pull-${{ github.event.number }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build query-service image
run: make build-query-service-amd64
- name: Build frontend image
run: make build-frontend-amd64
- name: Create a k3s cluster
uses: AbsaOSS/k3d-action@v2
with:
cluster-name: "signoz"
- name: Inject the images to the cluster
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
- name: Set up HotROD sample-app
run: |
# create sample-application namespace
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
- name: Deploy the app
run: |
# add signoz helm repository
helm repo add signoz https://charts.signoz.io
# create platform namespace
kubectl create ns platform
# installing signoz using helm
helm install my-release signoz/signoz -n platform \
--wait \
--timeout 10m0s \
--set cloud=null \
--set frontend.service.type=LoadBalancer \
--set query-service.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl describe deploy/my-release-frontend -n platform | grep Image
kubectl describe statefulset/my-release-query-service -n platform | grep Image
kubectl get pods -n platform
kubectl get svc -n platform
- name: Kick off a sample-app workload
run: |
# start the locust swarm
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
- name: Get short commit SHA and display tunnel URL
id: get-subdomain
run: |
subdomain="pr-$(git rev-parse --short HEAD)"
echo "URL for tunnelling: https://$subdomain.loca.lt"
echo "::set-output name=subdomain::$subdomain"
- name: Start tunnel
env:
SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }}
run: |
npm install -g localtunnel
host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4)
port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1)
lt -p $port -l $host -s $SUBDOMAIN

View File

@@ -1,172 +1,90 @@
name: push-pipeline
name: push
on:
push:
branches:
- main
- ^v[0-9]*.[0-9]*.x$
- develop
tags:
- "*"
# pull_request:
# branches:
# - main
# - v*
# paths:
# - 'pkg/**'
# - 'frontend/**'
- v*
jobs:
get-envs:
image-build-and-push-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- name: Checkout code
uses: actions/checkout@v2
- shell: bash
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v1.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
img_tag=""
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ ${array[1]} == "tags" ]
then
echo "tag build"
img_tag=${GITHUB_REF#refs/*/v}
elif [ ${array[1]} == "pull" ]
then
img_tag="pull-${{ github.event.number }}"
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "non tag build"
img_tag="latest"
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
# This is a condition where image tag looks like "pull/<pullrequest-name>" during pull request build
NEW_IMG_TAG=`echo $img_tag | sed "s/\//-/g"`
echo $NEW_IMG_TAG
echo export IMG_TAG=$NEW_IMG_TAG >> env-vars
echo export FRONTEND_IMAGE="frontend" >> env-vars
echo export QUERY_SERVICE="query-service" >> env-vars
echo export FLATTENER_PROCESSOR="flattener-processor" >> env-vars
- name: Build and push docker image
run: make build-push-query-service
- name: Uploading envs
uses: actions/upload-artifact@v2
with:
name: env_artifact
path: env-vars
build-and-push-frontend:
image-build-and-push-frontend:
runs-on: ubuntu-latest
needs:
- get-envs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Downloading image artifact
uses: actions/download-artifact@v2
with:
name: env_artifact
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Push Frontend Docker Image
shell: bash
env:
FRONTEND_DIRECTORY: "frontend"
REPONAME: ${{ secrets.REPONAME }}
FRONTEND_DOCKER_IMAGE: ${FRONTEND_IMAGE}
DOCKER_TAG: ${IMG_TAG}
- uses: benjlevesque/short-sha@v1.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
branch=${GITHUB_REF#refs/*/}
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
then
source env-vars
make build-push-frontend
fi
build-and-push-query-service:
runs-on: ubuntu-latest
needs:
- get-envs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Downloading image artifact
uses: actions/download-artifact@v2
with:
name: env_artifact
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Push Query Service Docker Image
shell: bash
env:
QUERY_SERVICE_DIRECTORY: "pkg/query-service"
REPONAME: ${{ secrets.REPONAME }}
QUERY_SERVICE_DOCKER_IMAGE: ${QUERY_SERVICE}
DOCKER_TAG: ${IMG_TAG}
run: |
branch=${GITHUB_REF#refs/*/}
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] ||[[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
then
source env-vars
make build-push-query-service
fi
build-and-push-flattener:
runs-on: ubuntu-latest
needs:
- get-envs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Downloading image artifact
uses: actions/download-artifact@v2
with:
name: env_artifact
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Push Flattener Processor Docker Image
shell: bash
env:
FLATTENER_DIRECTORY: "pkg/processors/flattener"
REPONAME: ${{ secrets.REPONAME }}
FLATTERNER_DOCKER_IMAGE: ${FLATTENER_PROCESSOR}
DOCKER_TAG: ${IMG_TAG}
run: |
branch=${GITHUB_REF#refs/*/}
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
then
source env-vars
make build-push-flattener
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend

16
.github/workflows/remove-label.yaml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: remove-label
on:
pull_request_target:
types: [synchronize]
jobs:
remove:
runs-on: ubuntu-latest
steps:
- name: Remove label
uses: buildsville/add-remove-label@v1
with:
label: ok-to-test
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

4
.gitignore vendored
View File

@@ -14,6 +14,8 @@ frontend/coverage
frontend/build
frontend/.vscode
frontend/.yarnclean
frontend/.temp_cache
# misc
.DS_Store
.env.local
@@ -33,7 +35,6 @@ frontend/cypress.env.json
.idea
**/.vscode
*.tgz
**/build
**/storage
**/locust-scripts/__pycache__/
@@ -41,3 +42,4 @@ frontend/cypress.env.json
frontend/*.env
pkg/query-service/signoz.db
/deploy/docker/clickhouse-setup/data/

36
.gitpod.yml Normal file
View File

@@ -0,0 +1,36 @@
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
# and commit this file to your remote git repository to share the goodness with others.
tasks:
- name: Run Script to Comment ut required lines
init: |
cd ./.scripts
sh commentLinesForSetup.sh
- name: Run Docker Images
init: |
cd ./deploy
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d
# command:
- name: Run Frontend
init: |
cd ./frontend
yarn install
command:
yarn dev
ports:
- port: 3000
onOpen: open-browser
- port: 8080
onOpen: ignore
- port: 9000
onOpen: ignore
- port: 8123
onOpen: ignore
- port: 8089
onOpen: ignore
- port: 9093
onOpen: ignore

View File

@@ -0,0 +1,7 @@
#!/bin/sh
# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml
# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages.
# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz
sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml

View File

@@ -1,13 +1,16 @@
# How to Contribute
There are primarily 3 areas in which you can contribute in SigNoz
There are primarily 2 areas in which you can contribute in SigNoz
- Frontend ( written in Typescript, React)
- Query Service (written in Go)
- Flattener Processor (written in Go)
- Backend - ( Query Service - written in Go)
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻
> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted.
# Develop Frontend
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
@@ -15,22 +18,27 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
### Contribute to Frontend with Docker installation of SigNoz
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38`
- run `cd deploy && docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend service)
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
- run `cd deploy` to move to deploy directory
- Install signoz locally without the frontend
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
- `yarn install`
- `yarn dev`
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
### Contribute to Frontend without installing SigNoz backend
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `<test environment URL>`
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
- `yarn install`
- `yarn dev`
**_Frontend should now be accessible at `http://localhost:3000/application`_**
**_Frontend should now be accessible at `http://localhost:3301/application`_**
# Contribute to Query-Service
@@ -38,25 +46,68 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht
### To run ClickHouse setup (recommended for local development)
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/deploy`
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L22`
- Run `docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend and query-service)
- `STORAGE=clickhouse ClickHouseUrl=tcp://localhost:9001 go run main.go`
- git clone https://github.com/SigNoz/signoz.git
- run `sudo make dev-setup` to configure local setup to run query-service
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L59`
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
- Install signoz locally without the frontend and query-service
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm`
> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh`
**_Query Service should now be available at `http://localhost:8080`_**
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
# Contribute to Flattener Processor
---
Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
Not needed to run for the ClickHouse setup
Click the button below. A workspace with all required environments will be created.
more info at [https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener](https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener)
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz)
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz`
# Contribute to SigNoz Helm Chart
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
### To run helm chart for local development
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**To load data with HotROD sample app:**
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
**To stop the load generation:**
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
---
## General Instructions
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
- If you find any bugs, please create an issue
- If you find anything missing in documentation, you can create an issue with label **documentation**

View File

@@ -1,7 +1,14 @@
#
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
#
# Build variables
BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
# Internal variables or constants.
#
FRONTEND_DIRECTORY ?= frontend
FLATTENER_DIRECTORY ?= pkg/processors/flattener
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
@@ -10,8 +17,17 @@ REPONAME ?= signoz
DOCKER_TAG ?= latest
FRONTEND_DOCKER_IMAGE ?= frontend
FLATTERNER_DOCKER_IMAGE ?= query-service
QUERY_SERVICE_DOCKER_IMAGE ?= flattener-processor
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
# Build-time Go variables
PACKAGE?=go.signoz.io/query-service
buildVersion=${PACKAGE}/version.buildVersion
buildHash=${PACKAGE}/version.buildHash
buildTime=${PACKAGE}/version.buildTime
gitBranch=${PACKAGE}/version.gitBranch
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
all: build-push-frontend build-push-query-service build-push-flattener
# Steps to build and push docker image of frontend
@@ -22,7 +38,7 @@ build-frontend-amd64:
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend:
@@ -40,7 +56,7 @@ build-query-service-amd64:
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS)
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service:
@@ -48,7 +64,7 @@ build-push-query-service:
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of flattener
.PHONY: build-flattener-amd64 build-push-flattener
@@ -67,3 +83,17 @@ build-push-flattener:
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
mkdir -p /var/lib/signoz
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
mkdir -p pkg/query-service/config/dashboards
@echo "------------------"
@echo "--> Local Setup completed"
@echo "------------------"
run-x86:
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
run-arm:
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d

View File

@@ -17,7 +17,7 @@
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@@ -39,7 +39,7 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei
## Werde Teil unserer Slack Community
Sag Hi zu uns auf [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
<br /><br />
@@ -130,7 +130,7 @@ Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
<br /><br />
@@ -146,7 +146,7 @@ Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unver
## Community
Werde Teil der [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.

View File

@@ -18,7 +18,7 @@
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@@ -33,7 +33,9 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
👉 Run aggregates on trace data to get business relevant metrics
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png)
![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)
<br /><br />
@@ -41,7 +43,7 @@ SigNoz helps developers monitor applications and troubleshoot problems in their
## Join our Slack community
Come say Hi to us on [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
Come say Hi to us on [Slack](https://signoz.io/slack) 👋
<br /><br />
@@ -132,7 +134,7 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
<br /><br />
@@ -148,7 +150,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
## Community
Join the [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
Join the [slack community](https://signoz.io/slack) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)

View File

@@ -15,7 +15,7 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Documentação</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Comunidade no Slack</b></a> &bull;
<a href="https://signoz.io/slack"><b>Comunidade no Slack</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@@ -38,7 +38,7 @@ SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problem
## Junte-se à nossa comunidade no Slack
Venha dizer oi para nós no [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
Venha dizer oi para nós no [Slack](https://signoz.io/slack) 👋
<br /><br />
@@ -129,7 +129,7 @@ Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger:
Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz.
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://signoz.io/slack)
<br /><br />
@@ -145,7 +145,7 @@ Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver
## Comunidade
Junte-se a [comunidade no Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
Junte-se a [comunidade no Slack](https://signoz.io/slack) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions)

View File

@@ -29,7 +29,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
## 加入我们的Slack社区
来[Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 跟我们打声招呼👋
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
<br /><br />
@@ -120,7 +120,7 @@ Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
还不清楚怎么开始? 只需在[slack社区](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)的`#contributing`频道里ping我们。
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
<br /><br />
@@ -136,7 +136,7 @@ Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们
## 社区
加入[slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA),了解更多关于分布式跟踪、可观察性(observability)以及SigNoz。同时与其他用户和贡献者一起交流。
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability)以及SigNoz。同时与其他用户和贡献者一起交流。
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。

88
deploy/README.md Normal file
View File

@@ -0,0 +1,88 @@
# Deploy
Check that you have cloned [signoz/signoz](https://github.com/signoz/signoz)
and currently are in `signoz/deploy` folder.
## Docker
If you don't have docker set up, please follow [this guide](https://docs.docker.com/engine/install/)
to set up docker before proceeding with the next steps.
### Using Install Script
Now run the following command to install:
```sh
./install.sh
```
### Using Docker Compose
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
to set up docker compose before proceeding with the next steps.
For x86 chip (amd):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
```
For Mac with Apple chip (arm):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
```
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI.
## Kubernetes
### Using Helm
#### Bring up SigNoz cluster
```sh
helm repo add signoz https://charts.signoz.io
kubectl create ns platform
helm -n platform install my-release signoz/signoz
```
To access the UI, you can `port-forward` the frontend service:
```sh
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
```
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
#### Test HotROD application with SigNoz
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```
To generate load:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
To stop load:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
## Uninstall/Troubleshoot?
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.

View File

@@ -50,7 +50,7 @@ services:
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -1,5 +1,5 @@
server {
listen 3000;
listen 3301;
server_name _;
gzip on;

View File

@@ -0,0 +1,98 @@
version: "2.4"
services:
clickhouse:
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
image: signoz/alertmanager:0.5.0
volumes:
- ./alertmanager.yml:/prometheus/alertmanager.yml
- ./data/alertmanager:/data
command:
- '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data'
query-service:
image: signoz/query-service:0.6.1
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.6.1
container_name: frontend
depends_on:
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "4317:4317" # OTLP GRPC receiver
mem_limit: 2000m
restart: always
depends_on:
clickhouse:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -2,24 +2,16 @@ version: "2.4"
services:
clickhouse:
image: yandex/clickhouse-server
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
image: yandex/clickhouse-server:21.12.3.32
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
image: signoz/alertmanager:0.5.0
@@ -29,88 +21,74 @@ services:
command:
- '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data'
ports:
- 9093:9093
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.5.0
image: signoz/query-service:0.6.1
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.5.0
container_name: frontend
frontend:
image: signoz/frontend:0.6.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.4.2
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
- "8889:8889" # prometheus exporter
mem_limit: 2000m
restart: always
depends_on:
clickhouse:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.4.2
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
ports:
- "9000:8080"
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
@@ -120,4 +98,4 @@ services:
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust
- ../common/locust-scripts:/locust

View File

@@ -1,31 +0,0 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@@ -9,7 +9,7 @@ receivers:
config:
scrape_configs:
- job_name: "otel-collector"
scrape_interval: 60s
scrape_interval: 30s
static_configs:
- targets: ["otel-collector:8889"]
processors:

View File

@@ -1,5 +1,5 @@
server {
listen 3000;
listen 3301;
server_name _;
gzip on;

View File

@@ -167,7 +167,8 @@ services:
container_name: query-service
depends_on:
- router
router:
condition: service_healthy
ports:
- "8080:8080"
volumes:
@@ -180,10 +181,6 @@ services:
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
@@ -193,7 +190,7 @@ services:
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -162,7 +162,8 @@ services:
container_name: query-service
depends_on:
- router
router:
condition: service_healthy
ports:
- "8080:8080"
@@ -176,10 +177,6 @@ services:
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
@@ -189,7 +186,7 @@ services:
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -36,6 +36,10 @@ is_mac() {
[[ $OSTYPE == darwin* ]]
}
is_arm64(){
[[ `uname -m` == 'arm64' ]]
}
check_os() {
if is_mac; then
package_manager="brew"
@@ -98,7 +102,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|3000|8080"
local ports_pattern="80|3301|8080"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -112,15 +116,7 @@ check_ports_occupied() {
fi
if [[ -n $port_check_output ]]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
@@ -203,15 +199,7 @@ install_docker_compose() {
echo ""
fi
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "docker_compose_not_found"
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
@@ -237,7 +225,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -267,12 +255,16 @@ bye() { # Prints a friendly good bye message and exits the script.
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
if is_arm64; then
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
else
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
fi
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
echo -e "\n📨 Please share your email to receive support with the installation"
@@ -283,16 +275,7 @@ bye() { # Prints a friendly good bye message and exits the script.
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "installation_support"
echo ""
@@ -309,10 +292,19 @@ echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
email=""
echo -e "Detecting your OS ..."
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
# Obtain unique installation id
sysinfo="$(uname -a)"
if [ $? -ne 0 ]; then
uuid="$(uuidgen)"
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
else
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1)
fi
# echo ""
@@ -342,29 +334,78 @@ setup_type='clickhouse'
# Run bye if failure happens
trap bye EXIT
URL="https://api.segment.io/v1/track"
HEADER_1="Content-Type: application/json"
HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
send_event() {
error=""
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
case "$1" in
'install_started')
event="Installation Started"
;;
'os_not_supported')
event="Installation Error"
error="OS Not Supported"
;;
'docker_not_installed')
event="Installation Error"
error="Docker not installed"
;;
'docker_compose_not_found')
event="Installation Error"
event="Docker Compose not found"
;;
'port_not_available')
event="Installation Error"
error="port not available"
;;
'installation_error_checks')
event="Installation Error - Checks"
error="Containers not started"
if [ $setup_type == 'clickhouse' ]; then
others='"data": "some_checks",'
else
supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",'
fi
;;
'installation_support')
event="Installation Support"
others='"email": "'"$email"'",'
;;
'installation_success')
event="Installation Success"
;;
'identify_successful_installation')
event="Identify Successful Installation"
others='"email": "'"$email"'",'
;;
*)
print_error "unknown event type: $1"
exit 1
;;
esac
if [[ $desired_os -eq 0 ]];then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
if [ "$error" != "" ]; then
error='"error": "'"$error"'", '
fi
DATA='{ "anonymousId": "'"$SIGNOZ_INSTALLATION_ID"'", "event": "'"$event"'", "properties": { "os": "'"$os"'", '"$error $others"' "setup_type": "'"$setup_type"'" } }'
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
fi
}
send_event "install_started"
if [[ $desired_os -eq 0 ]]; then
send_event "os_not_supported"
fi
# check_ports_occupied
@@ -379,15 +420,8 @@ if ! is_command_present docker; then
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "docker_not_installed"
exit 1
fi
fi
@@ -398,7 +432,6 @@ if ! is_command_present docker-compose; then
fi
start_docker
@@ -408,7 +441,11 @@ start_docker
echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
if is_arm64; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
else
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
fi
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
fi
@@ -420,7 +457,11 @@ echo
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
if is_arm64; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
else
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
fi
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
fi
@@ -433,63 +474,42 @@ if [[ $status_code -ne 200 ]]; then
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "or reach us on SigNoz for support https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
if [ $setup_type == 'clickhouse' ]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
fi
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "installation_error_checks"
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
send_event "installation_success"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3000"
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
if is_arm64; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
fi
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
fi
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "👉 Need help Getting Started?"
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo -e "Join us on Slack https://signoz.io/slack"
echo ""
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
@@ -499,16 +519,7 @@ else
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "identify_successful_installation"
fi
echo -e "\n🙏 Thank you!\n"

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: retention-config
data:
retention-spec.json: |
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]

View File

@@ -1,29 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: set-retention
annotations:
"helm.sh/hook": post-install,post-upgrade
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: set-retention
image: theithollow/hollowapp-blog:curl
volumeMounts:
- name: retention-config-volume
mountPath: /app/retention-spec.json
subPath: retention-spec.json
args:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://signoz-druid-router:8888/druid/coordinator/v1/rules/flattened_spans"
volumes:
- name: retention-config-volume
configMap:
name: retention-config
restartPolicy: Never
backoffLimit: 8

View File

@@ -1,76 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: supervisor-config
data:
supervisor-spec.json: |
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "signoz-kafka:9092"
}
}
}

View File

@@ -1,27 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: create-supervisor
annotations:
"helm.sh/hook": post-install,post-upgrade
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: create-supervisor
image: theithollow/hollowapp-blog:curl
volumeMounts:
- name: supervisor-config-volume
mountPath: /app/supervisor-spec.json
subPath: supervisor-spec.json
args:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://signoz-druid-router:8888/druid/indexer/v1/supervisor"
volumes:
- name: supervisor-config-volume
configMap:
name: supervisor-config
restartPolicy: Never
backoffLimit: 8

View File

@@ -1,60 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-conf
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
queued_retry:
num_workers: 4
queue_size: 100
retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
kafka/traces:
brokers:
- signoz-kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- signoz-kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka/metrics]

View File

@@ -1,72 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector
minReadySeconds: 5
progressDeadlineSeconds: 120
replicas: 1 #TODO - adjust this to your own requirements
template:
metadata:
labels:
app: opentelemetry
component: otel-collector
spec:
containers:
- command:
- "/otelcol"
- "--config=/conf/otel-collector-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
image: otel/opentelemetry-collector:0.18.0
name: otel-collector
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 55679 # Default endpoint for ZPages.
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 9411 # Default endpoint for Zipkin receiver.
- containerPort: 8888 # Default endpoint for querying metrics.
volumeMounts:
- name: otel-collector-config-vol
mountPath: /conf
# - name: otel-collector-secrets
# mountPath: /secrets
livenessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
readinessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
volumes:
- configMap:
name: otel-collector-conf
items:
- key: otel-collector-config
path: otel-collector-config.yaml
name: otel-collector-config-vol
# - secret:
# name: otel-collector-secrets
# items:
# - key: cert.pem
# path: cert.pem
# - key: key.pem
# path: key.pem

View File

@@ -1,31 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
ports:
- name: otlp # Default endpoint for OpenTelemetry receiver.
port: 55680
protocol: TCP
targetPort: 55680
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
port: 55681
protocol: TCP
targetPort: 55681
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
port: 4317
protocol: TCP
targetPort: 4317
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
port: 14250
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
port: 14268
- name: zipkin # Default endpoint for Zipkin receiver.
port: 9411
- name: metrics # Default endpoint for querying metrics.
port: 8888
selector:
component: otel-collector

View File

@@ -1,21 +0,0 @@
dependencies:
- name: zookeeper
repository: https://charts.bitnami.com/bitnami
version: 6.0.0
- name: kafka
repository: https://charts.bitnami.com/bitnami
version: 12.0.0
- name: druid
repository: https://charts.helm.sh/incubator
version: 0.2.18
- name: flattener-processor
repository: file://./signoz-charts/flattener-processor
version: 0.3.6
- name: query-service
repository: file://./signoz-charts/query-service
version: 0.3.6
- name: frontend
repository: file://./signoz-charts/frontend
version: 0.3.6
digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228
generated: "2021-08-23T12:06:37.231066+05:30"

View File

@@ -1,43 +0,0 @@
apiVersion: v2
name: signoz-platform
description: SigNoz Observability Platform Helm Chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.3.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.3.2
dependencies:
- name: zookeeper
repository: "https://charts.bitnami.com/bitnami"
version: 6.0.0
- name: kafka
repository: "https://charts.bitnami.com/bitnami"
version: 12.0.0
- name: druid
repository: "https://charts.helm.sh/incubator"
version: 0.2.18
- name: flattener-processor
repository: "file://./signoz-charts/flattener-processor"
version: 0.3.6
- name: query-service
repository: "file://./signoz-charts/query-service"
version: 0.3.6
- name: frontend
repository: "file://./signoz-charts/frontend"
version: 0.3.6

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,21 +0,0 @@
apiVersion: v2
name: flattener-processor
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.3.6

View File

@@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flattener-processor.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flattener-processor.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flattener-processor.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flattener-processor.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "flattener-processor.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "flattener-processor.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flattener-processor.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "flattener-processor.labels" -}}
helm.sh/chart: {{ include "flattener-processor.chart" . }}
{{ include "flattener-processor.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "flattener-processor.selectorLabels" -}}
app.kubernetes.io/name: {{ include "flattener-processor.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "flattener-processor.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "flattener-processor.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,65 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "flattener-processor.fullname" . }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "flattener-processor.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "flattener-processor.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "flattener-processor.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/root/flattener"
ports:
- name: http
containerPort: 8080
protocol: TCP
env:
- name: KAFKA_BROKER
value: {{ .Values.configVars.KAFKA_BROKER }}
- name: KAFKA_INPUT_TOPIC
value: {{ .Values.configVars.KAFKA_INPUT_TOPIC }}
- name: KAFKA_OUTPUT_TOPIC
value: {{ .Values.configVars.KAFKA_OUTPUT_TOPIC }}
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "flattener-processor.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "flattener-processor.fullname" . }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "flattener-processor.selectorLabels" . | nindent 4 }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "flattener-processor.serviceAccountName" . }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "flattener-processor.fullname" . }}-test-connection"
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "flattener-processor.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -1,74 +0,0 @@
# Default values for flattener-processor.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/flattener-processor
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configVars:
KAFKA_BROKER: signoz-kafka:9092
KAFKA_INPUT_TOPIC: otlp_spans
KAFKA_OUTPUT_TOPIC: flattened_spans
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,21 +0,0 @@
apiVersion: v2
name: frontend
description: A Helm chart for SigNoz Frontend Service
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.3.6

View File

@@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "frontend.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "frontend.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "frontend.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "frontend.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "frontend.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "frontend.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "frontend.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "frontend.labels" -}}
helm.sh/chart: {{ include "frontend.chart" . }}
{{ include "frontend.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "frontend.selectorLabels" -}}
app.kubernetes.io/name: {{ include "frontend.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "frontend.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "frontend.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,37 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.config.name }}
labels:
release: {{ .Release.Name }}
data:
default.conf: |-
server {
listen {{ .Values.service.port }};
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api {
proxy_pass http://{{ .Values.config.queryServiceUrl }}/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1,64 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "frontend.fullname" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "frontend.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "frontend.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
volumes:
- name: nginx-config
configMap:
name: {{ .Values.config.name }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
- name: REACT_APP_QUERY_SERVICE_URL
value: {{ .Values.configVars.REACT_APP_QUERY_SERVICE_URL }}
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "frontend.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "frontend.fullname" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "frontend.selectorLabels" . | nindent 4 }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "frontend.serviceAccountName" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "frontend.fullname" . }}-test-connection"
labels:
{{- include "frontend.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "frontend.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -1,75 +0,0 @@
# Default values for frontend.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/frontend
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configVars: {}
config:
name: signoz-nginx-config
queryServiceUrl: signoz-query-service:8080
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 3000
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,21 +0,0 @@
apiVersion: v2
name: query-service
description: A Helm chart for running SigNoz Query Service in Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.3.6

View File

@@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "query-service.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "query-service.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "query-service.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "query-service.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "query-service.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "query-service.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "query-service.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "query-service.labels" -}}
helm.sh/chart: {{ include "query-service.chart" . }}
{{ include "query-service.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "query-service.selectorLabels" -}}
app.kubernetes.io/name: {{ include "query-service.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "query-service.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "query-service.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "query-service.fullname" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "query-service.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "query-service.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "query-service.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
protocol: TCP
env:
- name: DruidClientUrl
value: {{ .Values.configVars.DruidClientUrl }}
- name: DruidDatasource
value: {{ .Values.configVars.DruidDatasource }}
- name: STORAGE
value: {{ .Values.configVars.STORAGE }}
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "query-service.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "query-service.fullname" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "query-service.selectorLabels" . | nindent 4 }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "query-service.serviceAccountName" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "query-service.fullname" . }}-test-connection"
labels:
{{- include "query-service.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "query-service.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -1,76 +0,0 @@
# Default values for query-service.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/query-service
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,51 +0,0 @@
zookeeper:
autopurge:
purgeInterval: 1
kafka:
zookeeper:
enabled: false
externalZookeeper:
servers: ["signoz-zookeeper:2181"]
zookeeperConnectionTimeoutMs: 6000
druid:
image:
tag: 0.21.1-rc2
configVars:
# To store data on local disks attached
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]'
druid_storage_type: local
# # To store data in S3
# druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]'
# druid_storage_type: s3
# druid_storage_bucket: signoz-druid
# druid_storage_baseKey: baseKey
# AWS_ACCESS_KEY_ID: <your secret id>
# AWS_SECRET_ACCESS_KEY: <your secret key>
# AWS_REGION: <your region>
historical:
persistence:
size: "20Gi"
zkHosts: "signoz-zookeeper:2181"
zookeeper:
enabled: false
flattener-processor:
configVars:
KAFKA_BROKER: signoz-kafka:9092
KAFKA_INPUT_TOPIC: otlp_spans
KAFKA_OUTPUT_TOPIC: flattened_spans
query-service:
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid

View File

@@ -41,7 +41,8 @@ module.exports = {
'react/prop-types': 'off',
'@typescript-eslint/explicit-function-return-type': 'error',
'@typescript-eslint/no-var-requires': 0,
'linebreak-style': ['error', 'unix'],
'react/no-array-index-key': 2,
'linebreak-style': ['error', process.platform === 'win32' ? 'windows' : 'unix'],
// simple sort error
'simple-import-sort/imports': 'error',

View File

@@ -32,6 +32,6 @@ RUN rm -rf /usr/share/nginx/html/*
# Copy from the stahg 1
COPY --from=builder /frontend/build /usr/share/nginx/html
EXPOSE 3000
EXPOSE 3301
ENTRYPOINT ["nginx", "-g", "daemon off;"]

View File

@@ -44,7 +44,7 @@ In the project directory, you can run:
### `yarn start`
Runs the app in the development mode.\
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
The page will reload if you make edits.\
You will also see any lint errors in the console.

View File

@@ -0,0 +1,8 @@
{
"files": [
{
"path": "./build/**.js",
"maxSize": "1.2MB"
}
]
}

View File

@@ -1,5 +1,5 @@
server {
listen 3000;
listen 3301;
server_name _;
gzip on;

View File

@@ -0,0 +1,21 @@
{
"data": [
{
"created_at": 1638083159246,
"data": "{}",
"id": 1,
"name": "First Channels",
"type": "slack",
"updated_at": 1638083159246
},
{
"created_at": 1638083159246,
"data": "{}",
"id": 2,
"name": "Second Channels",
"type": "Slack",
"updated_at": 1638083159246
}
],
"message": "Success"
}

View File

@@ -0,0 +1,28 @@
{
"status": "success",
"data": {
"rules": [
{
"labels": { "severity": "warning" },
"annotations": {},
"state": "firing",
"name": "First Rule",
"id": 1
},
{
"labels": { "severity": "warning" },
"annotations": {},
"state": "firing",
"name": "Second Rule",
"id": 2
},
{
"labels": { "severity": "P0" },
"annotations": {},
"state": "firing",
"name": "Third Rule",
"id": 3
}
]
}
}

View File

@@ -0,0 +1,52 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
import defaultAllChannels from '../../fixtures/defaultAllChannels.json';
describe('Channels', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
cy.visit(Cypress.env('baseUrl') + ROUTES.ALL_CHANNELS);
});
it('Channels', () => {
cy
.intercept('**channels**', {
statusCode: 200,
fixture: 'defaultAllChannels',
})
.as('All Channels');
cy.wait('@All Channels');
cy
.get('.ant-tabs-tab')
.children()
.then((e) => {
const child = e.get();
const secondChild = child[1];
expect(secondChild.outerText).to.be.equals('Alert Channels');
expect(secondChild.ariaSelected).to.be.equals('true');
});
cy
.get('tbody')
.should('be.visible')
.then((e) => {
const allChildren = e.children().get();
expect(allChildren.length).to.be.equals(defaultAllChannels.data.length);
allChildren.forEach((e, index) => {
expect(e.firstChild?.textContent).not.null;
expect(e.firstChild?.textContent).to.be.equals(
defaultAllChannels.data[index].name,
);
});
});
});
});

View File

@@ -20,7 +20,7 @@ describe('default time', () => {
it('Trace Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.TRACES,
route: ROUTES.TRACE,
});
});

View File

@@ -0,0 +1,128 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
import defaultRules from '../../fixtures/defaultRules.json';
describe('Alerts', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
cy
.intercept('get', '*rules*', {
fixture: 'defaultRules',
})
.as('defaultRules');
cy.visit(Cypress.env('baseUrl') + `${ROUTES.LIST_ALL_ALERT}`);
cy.wait('@defaultRules');
});
it('Edit Rules Page Failure', async () => {
cy
.intercept('**/rules/**', {
statusCode: 500,
})
.as('Get Rules Error');
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
const firstDelete = e[0];
firstDelete.click();
cy.waitFor('@Get Rules Error');
cy
.window()
.location()
.then((e) => {
expect(e.pathname).to.be.equals(`/alerts/edit/1`);
});
cy.findByText('Something went wrong').then((e) => {
expect(e.length).to.be.equals(1);
});
});
});
it('Edit Rules Page Success', async () => {
const text = 'this is the sample value';
cy
.intercept('**/rules/**', {
statusCode: 200,
body: {
data: {
data: text,
},
},
})
.as('Get Rules Success');
cy.get('button.ant-btn.ant-btn-link:nth-child(2)').then((e) => {
const firstDelete = e[0];
firstDelete.click();
cy.waitFor('@Get Rules Success');
cy.wait(1000);
cy.findByText('Save').then((e) => {
const [el] = e.get();
el.click();
});
});
});
it('All Rules are rendered correctly', async () => {
cy
.window()
.location()
.then(({ pathname }) => {
expect(pathname).to.be.equals(ROUTES.LIST_ALL_ALERT);
cy.get('tbody').then((e) => {
const tarray = e.children().get();
expect(tarray.length).to.be.equals(3);
tarray.forEach(({ children }, index) => {
const name = children[1]?.textContent;
const label = children[2]?.textContent;
expect(name).to.be.equals(defaultRules.data.rules[index].name);
const defaultLabels = defaultRules.data.rules[index].labels;
expect(label).to.be.equals(defaultLabels['severity']);
});
});
});
});
it('Rules are Deleted', async () => {
cy
.intercept('**/rules/**', {
body: {
data: 'Deleted',
message: 'Success',
},
statusCode: 200,
})
.as('deleteRules');
cy.get('button.ant-btn.ant-btn-link:first-child').then((e) => {
const firstDelete = e[0];
firstDelete.click();
});
cy.wait('@deleteRules');
cy.get('tbody').then((e) => {
const trray = e.children().get();
expect(trray.length).to.be.equals(2);
});
});
});

View File

@@ -4,4 +4,4 @@ services:
build: .
image: signoz/frontend:latest
ports:
- "3000:3000"
- "3301:3301"

View File

@@ -1,28 +0,0 @@
const gulp = require('gulp');
const gulpless = require('gulp-less');
const postcss = require('gulp-postcss');
const debug = require('gulp-debug');
var csso = require('gulp-csso');
const autteoprefixer = require('autoprefixer');
const NpmImportPlugin = require('less-plugin-npm-import');
gulp.task('less', function () {
const plugins = [autteoprefixer()];
return gulp
.src('src/themes/*-theme.less')
.pipe(debug({ title: 'Less files:' }))
.pipe(
gulpless({
javascriptEnabled: true,
plugins: [new NpmImportPlugin({ prefix: '~' })],
}),
)
.pipe(postcss(plugins))
.pipe(
csso({
debug: true,
}),
)
.pipe(gulp.dest('./public'));
});

View File

@@ -4,8 +4,8 @@
"description": "",
"main": "webpack.config.js",
"scripts": {
"dev": "NODE_OPTIONS=\"--loader ts-node/esm\" NODE_ENV=development webpack serve --config=webpack.config.ts --progress",
"build": "NODE_OPTIONS=\"--loader ts-node/esm\" webpack --config=webpack.config.prod.ts --progress",
"dev": "cross-env NODE_ENV=development webpack serve --progress",
"build": "webpack --config=webpack.config.prod.js --progress",
"prettify": "prettier --write .",
"lint": "eslint . --debug",
"lint:fix": "eslint . --fix --debug",
@@ -13,31 +13,19 @@
"cypress:run": "cypress run",
"jest": "jest",
"jest:coverage": "jest --coverage",
"jest:watch": "jest --watch"
"jest:watch": "jest --watch",
"bundle:size": "bundlesize"
},
"engines": {
"node": ">=12.13.0"
},
"type": "module",
"author": "",
"license": "ISC",
"dependencies": {
"@ant-design/icons": "^4.6.2",
"@auth0/auth0-react": "^1.2.0",
"@pmmmwh/react-refresh-webpack-plugin": "0.4.2",
"@svgr/webpack": "5.4.0",
"@testing-library/jest-dom": "^5.11.4",
"@testing-library/react": "^11.1.0",
"@testing-library/user-event": "^12.1.10",
"@types/d3": "^6.2.0",
"@types/jest": "^26.0.15",
"@types/react": "^17.0.0",
"@types/react-dom": "^16.9.9",
"@types/react-redux": "^7.1.11",
"@types/react-router-dom": "^5.1.6",
"@types/redux": "^3.6.0",
"@types/styled-components": "^5.1.4",
"@types/vis": "^4.21.21",
"antd": "^4.16.13",
"axios": "^0.21.0",
"babel-eslint": "^10.1.0",
@@ -46,74 +34,44 @@
"babel-plugin-named-asset-import": "^0.3.7",
"babel-preset-minify": "^0.5.1",
"babel-preset-react-app": "^10.0.0",
"bfj": "^7.0.2",
"camelcase": "^6.1.0",
"case-sensitive-paths-webpack-plugin": "2.3.0",
"chart.js": "^3.4.0",
"chartjs-adapter-date-fns": "^2.0.0",
"cross-env": "^7.0.3",
"css-loader": "4.3.0",
"css-minimizer-webpack-plugin": "^3.2.0",
"d3": "^6.2.0",
"d3-flame-graph": "^3.1.1",
"d3-tip": "^0.9.1",
"dayjs": "^1.10.7",
"dotenv": "8.2.0",
"dotenv-expand": "5.1.0",
"eslint-config-react-app": "^6.0.0",
"eslint-plugin-flowtype": "^5.2.0",
"eslint-plugin-jest": "^24.1.0",
"eslint-plugin-jsx-a11y": "^6.3.1",
"eslint-plugin-react-hooks": "^4.2.0",
"eslint-plugin-testing-library": "^3.9.2",
"eslint-webpack-plugin": "^2.1.0",
"file-loader": "6.1.1",
"fs-extra": "^9.0.1",
"history": "4.10.1",
"html-webpack-plugin": "5.1.0",
"identity-obj-proxy": "3.0.0",
"jest": "26.6.0",
"jest-circus": "26.6.0",
"jest-resolve": "26.6.0",
"jest-watch-typeahead": "0.6.1",
"less": "^4.1.2",
"less-loader": "^10.2.0",
"mini-css-extract-plugin": "2.4.5",
"monaco-editor": "^0.30.0",
"pnp-webpack-plugin": "1.6.4",
"postcss-loader": "3.0.0",
"postcss-normalize": "8.0.1",
"postcss-preset-env": "6.7.0",
"postcss-safe-parser": "5.0.2",
"prop-types": "^15.6.2",
"react": "17.0.0",
"react-app-polyfill": "^2.0.0",
"react-chips": "^0.8.0",
"react-css-theme-switcher": "^0.1.6",
"react-dev-utils": "^11.0.0",
"react-dom": "17.0.0",
"react-force-graph": "^1.41.0",
"react-graph-vis": "^1.0.5",
"react-grid-layout": "^1.2.5",
"react-modal": "^3.12.1",
"react-redux": "^7.2.2",
"react-refresh": "^0.8.3",
"react-router-dom": "^5.2.0",
"react-vis": "^1.11.7",
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"resolve": "1.18.1",
"resolve-url-loader": "^3.1.2",
"sass-loader": "8.0.2",
"semver": "7.3.2",
"style-loader": "1.3.0",
"styled-components": "^5.2.1",
"terser-webpack-plugin": "4.2.3",
"terser-webpack-plugin": "^5.2.5",
"ts-node": "^10.2.1",
"ts-pnp": "1.2.0",
"tsconfig-paths-webpack-plugin": "^3.5.1",
"typescript": "^4.0.5",
"url-loader": "4.1.1",
"uuid": "^8.3.2",
"web-vitals": "^0.2.4",
"webpack": "^5.23.0",
"webpack-dev-server": "^4.3.1",
"webpack-manifest-plugin": "2.2.0",
"workbox-webpack-plugin": "5.1.4"
"webpack-dev-server": "^4.3.1"
},
"browserslist": {
"production": [
@@ -137,11 +95,20 @@
"@testing-library/cypress": "^8.0.0",
"@types/compression-webpack-plugin": "^9.0.0",
"@types/copy-webpack-plugin": "^8.0.1",
"@types/d3": "^6.2.0",
"@types/d3-tip": "^3.5.5",
"@types/jest": "^26.0.15",
"@types/lodash-es": "^4.17.4",
"@types/node": "^16.10.3",
"@types/react": "^17.0.0",
"@types/react-dom": "^16.9.9",
"@types/react-grid-layout": "^1.1.2",
"@types/react-redux": "^7.1.11",
"@types/react-router-dom": "^5.1.6",
"@types/redux": "^3.6.0",
"@types/styled-components": "^5.1.4",
"@types/uuid": "^8.3.1",
"@types/vis": "^4.21.21",
"@types/webpack": "^5.28.0",
"@types/webpack-dev-server": "^4.3.0",
"@typescript-eslint/eslint-plugin": "^4.28.2",
@@ -149,8 +116,10 @@
"@welldone-software/why-did-you-render": "^6.2.1",
"autoprefixer": "^9.0.0",
"babel-plugin-styled-components": "^1.12.0",
"bundlesize": "^0.18.1",
"compression-webpack-plugin": "^9.0.0",
"copy-webpack-plugin": "^8.1.0",
"critters-webpack-plugin": "^3.0.1",
"cypress": "^8.3.0",
"eslint": "^7.30.0",
"eslint-config-prettier": "^8.3.0",
@@ -161,11 +130,6 @@
"eslint-plugin-promise": "^5.1.0",
"eslint-plugin-react": "^7.24.0",
"eslint-plugin-simple-import-sort": "^7.0.0",
"gulp": "^4.0.2",
"gulp-csso": "^4.0.1",
"gulp-debug": "^4.0.0",
"gulp-less": "^4.0.1",
"gulp-postcss": "^9.0.0",
"husky": "4.3.8",
"less-plugin-npm-import": "^2.1.0",
"lint-staged": "10.5.3",
@@ -173,8 +137,8 @@
"portfinder-sync": "^0.0.2",
"prettier": "2.2.1",
"react-hot-loader": "^4.13.0",
"react-is": "^17.0.1",
"ts-node": "^10.2.1",
"webpack-bundle-analyzer": "^4.5.0",
"webpack-cli": "^4.5.0"
}
}

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 10 KiB

View File

@@ -2,4 +2,4 @@
<path d="M765.131 338.922L805.631 334.984C808.068 348.578 812.99 358.562 820.396 364.938C827.896 371.312 837.974 374.5 850.631 374.5C864.037 374.5 874.115 371.688 880.865 366.062C887.709 360.344 891.131 353.688 891.131 346.094C891.131 341.219 889.677 337.094 886.771 333.719C883.959 330.25 878.99 327.25 871.865 324.719C866.99 323.031 855.881 320.031 838.537 315.719C816.224 310.188 800.568 303.391 791.568 295.328C778.912 283.984 772.584 270.156 772.584 253.844C772.584 243.344 775.537 233.547 781.443 224.453C787.443 215.266 796.021 208.281 807.177 203.5C818.427 198.719 831.974 196.328 847.818 196.328C873.693 196.328 893.146 202 906.177 213.344C919.302 224.688 926.193 239.828 926.849 258.766L885.224 260.594C883.443 250 879.599 242.406 873.693 237.812C867.881 233.125 859.115 230.781 847.396 230.781C835.302 230.781 825.834 233.266 818.99 238.234C814.584 241.422 812.381 245.688 812.381 251.031C812.381 255.906 814.443 260.078 818.568 263.547C823.818 267.953 836.568 272.547 856.818 277.328C877.068 282.109 892.021 287.078 901.677 292.234C911.427 297.297 919.021 304.281 924.459 313.188C929.99 322 932.756 332.922 932.756 345.953C932.756 357.766 929.474 368.828 922.912 379.141C916.349 389.453 907.068 397.141 895.068 402.203C883.068 407.172 868.115 409.656 850.209 409.656C824.146 409.656 804.131 403.656 790.162 391.656C776.193 379.562 767.849 361.984 765.131 338.922ZM967.49 236.406V199.844H1007.01V236.406H967.49ZM967.49 406V256.656H1007.01V406H967.49ZM1043.99 415.844L1089.13 421.328C1089.88 426.578 1091.61 430.188 1094.33 432.156C1098.08 434.969 1103.99 436.375 1112.05 436.375C1122.36 436.375 1130.1 434.828 1135.26 431.734C1138.72 429.672 1141.35 426.344 1143.13 421.75C1144.35 418.469 1144.96 412.422 1144.96 403.609V381.812C1133.15 397.938 1118.24 406 1100.24 406C1080.18 406 1064.29 397.516 1052.57 380.547C1043.38 367.141 1038.79 350.453 1038.79 330.484C1038.79 305.453 1044.79 286.328 1056.79 273.109C1068.88 259.891 1083.88 253.281 1101.79 253.281C1120.26 253.281 1135.49 261.391 1147.49 277.609V256.656H1184.47V390.672C1184.47 408.297 1183.02 421.469 1180.11 430.188C1177.21 438.906 1173.13 445.75 1167.88 450.719C1162.63 455.688 1155.6 459.578 1146.79 462.391C1138.07 465.203 1127.01 466.609 1113.6 466.609C1088.29 466.609 1070.33 462.25 1059.74 453.531C1049.15 444.906 1043.85 433.938 1043.85 420.625C1043.85 419.312 1043.9 417.719 1043.99 415.844ZM1079.29 328.234C1079.29 344.078 1082.33 355.703 1088.43 363.109C1094.61 370.422 1102.21 374.078 1111.21 374.078C1120.86 374.078 1129.02 370.328 1135.68 362.828C1142.33 355.234 1145.66 344.031 1145.66 329.219C1145.66 313.75 1142.47 302.266 1136.1 294.766C1129.72 287.266 1121.66 283.516 1111.91 283.516C1102.44 283.516 1094.61 287.219 1088.43 294.625C1082.33 301.938 1079.29 313.141 1079.29 328.234ZM1224.41 406V199.844H1264.91L1349.29 337.516V199.844H1387.96V406H1346.19L1263.08 271.562V406H1224.41ZM1422.69 329.219C1422.69 316.094 1425.93 303.391 1432.4 291.109C1438.86 278.828 1448.01 269.453 1459.82 262.984C1471.72 256.516 1484.99 253.281 1499.61 253.281C1522.21 253.281 1540.72 260.641 1555.16 275.359C1569.6 289.984 1576.82 308.5 1576.82 330.906C1576.82 353.5 1569.51 372.25 1554.88 387.156C1540.35 401.969 1522.02 409.375 1499.9 409.375C1486.21 409.375 1473.13 406.281 1460.66 400.094C1448.29 393.906 1438.86 384.859 1432.4 372.953C1425.93 360.953 1422.69 346.375 1422.69 329.219ZM1463.19 331.328C1463.19 346.141 1466.71 357.484 1473.74 365.359C1480.77 373.234 1489.44 377.172 1499.76 377.172C1510.07 377.172 1518.69 373.234 1525.63 365.359C1532.66 357.484 1536.18 346.047 1536.18 331.047C1536.18 316.422 1532.66 305.172 1525.63 297.297C1518.69 289.422 1510.07 285.484 1499.76 285.484C1489.44 285.484 1480.77 289.422 1473.74 297.297C1466.71 305.172 1463.19 316.516 1463.19 331.328ZM1592.01 406V375.203L1647.97 310.938C1657.16 300.438 1663.96 292.984 1668.36 288.578C1663.77 288.859 1657.72 289.047 1650.22 289.141L1597.49 289.422V256.656H1720.96V284.641L1663.86 350.453L1643.76 372.25C1654.72 371.594 1661.52 371.266 1664.15 371.266H1725.32V406H1592.01Z" fill="white"/>
<path opacity="0.9" d="M296.795 599.499C131.909 599.499 0 468.361 0 304.437C0 142.153 131.909 9.37476 296.795 9.37476H483.116C544.124 9.37476 591.941 58.5518 591.941 117.564V304.437C591.941 468.361 460.032 599.499 296.795 599.499Z" fill="#F25733"/>
<path d="M294.467 176.702C171.309 176.702 101.936 280.076 99.0428 284.476C91.91 295.315 91.91 309.334 99.0481 320.181C101.936 324.574 171.309 427.947 294.467 427.947C417.624 427.947 486.997 324.574 489.89 320.173C497.023 309.334 497.024 295.315 489.885 284.468C486.997 280.076 417.624 176.702 294.467 176.702ZM116.09 308.659C113.557 304.811 113.557 299.839 116.09 295.99C118.416 292.45 167.808 218.911 256.115 201.271C216.099 216.928 187.625 256.307 187.625 302.325C187.625 348.342 216.099 387.721 256.115 403.378C167.808 385.737 118.416 312.198 116.09 308.659ZM245.232 302.324C245.232 308.059 240.646 312.706 234.989 312.706C229.331 312.706 224.746 308.059 224.746 302.324C224.746 263.357 256.022 231.655 294.466 231.655C300.123 231.655 304.709 236.303 304.709 242.037C304.709 247.772 300.123 252.419 294.466 252.419C267.317 252.419 245.232 274.806 245.232 302.324ZM294.467 327.565C280.736 327.565 269.565 316.243 269.565 302.325C269.565 288.407 280.736 277.084 294.467 277.084C308.199 277.084 319.369 288.406 319.369 302.325C319.369 316.243 308.199 327.565 294.467 327.565ZM472.843 308.659C470.516 312.198 421.125 385.737 332.818 403.378C372.836 387.72 401.309 348.342 401.309 302.325C401.309 256.307 372.836 216.929 332.818 201.272C421.125 218.913 470.516 292.451 472.843 295.99C475.376 299.839 475.376 304.811 472.843 308.659Z" fill="#F9F2F9"/>
</svg>
</svg>

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@@ -4,24 +4,43 @@ import ROUTES from 'constants/routes';
import AppLayout from 'container/AppLayout';
import history from 'lib/history';
import React, { Suspense } from 'react';
import { useSelector } from 'react-redux';
import { Redirect, Route, Router, Switch } from 'react-router-dom';
import { AppState } from 'store/reducers';
import AppReducer from 'types/reducer/app';
import routes from './routes';
const App = (): JSX.Element => (
<Router history={history}>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }, index) => (
<Route key={index} exact={exact} path={path} component={component} />
))}
<Redirect from="/" to={ROUTES.APPLICATION} />
<Route component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</Router>
);
const App = (): JSX.Element => {
const { isLoggedIn } = useSelector<AppState, AppReducer>((state) => state.app);
return (
<Router history={history}>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }, index) => (
<Route key={index} exact={exact} path={path} component={component} />
))}
<Route
path="/"
exact
render={(): JSX.Element =>
isLoggedIn ? (
<Redirect to={ROUTES.APPLICATION} />
) : (
<Redirect to={ROUTES.SIGN_UP} />
)
}
/>
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</AppLayout>
</Router>
);
};
export default App;

View File

@@ -19,7 +19,7 @@ export const ServiceMapPage = Loadable(
);
export const TraceDetailPages = Loadable(
() => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/TraceDetails'),
() => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/Trace'),
);
export const TraceGraphPage = Loadable(

View File

@@ -1,17 +1,15 @@
import { apiV1, AxiosAlertManagerInstance } from 'api';
import { apiV2 } from 'api/apiV1';
import { AxiosAlertManagerInstance } from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/alerts/getGroups';
import convertObjectIntoParams from 'lib/query/convertObjectIntoParams';
const getGroups = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const queryParams = Object.keys(props)
.map((e) => `${e}=${props[e]}`)
.join('&');
const queryParams = convertObjectIntoParams(props);
const response = await AxiosAlertManagerInstance.get(
`/alerts/groups?${queryParams}`,

View File

@@ -1,5 +1,10 @@
const get = (key: string): string | null => {
return localStorage.getItem(key);
try {
const value = localStorage.getItem(key);
return value;
} catch (e) {
return '';
}
};
export default get;

View File

@@ -1,5 +1,10 @@
const remove = (key: string): void => {
window.localStorage.removeItem(key);
const remove = (key: string): boolean => {
try {
window.localStorage.removeItem(key);
return true;
} catch (e) {
return false;
}
};
export default remove;

View File

@@ -1,5 +1,10 @@
const set = (key: string, value: string): void => {
localStorage.setItem(key, value);
const set = (key: string, value: string): boolean => {
try {
localStorage.setItem(key, value);
return true;
} catch (e) {
return false;
}
};
export default set;

View File

@@ -0,0 +1,48 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getFilters';
import omitBy from 'lodash-es/omitBy';
const getFilters = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const duration =
omitBy(props.other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(props.other, (_, key) =>
key.startsWith('duration'),
);
const exclude: string[] = [];
props.isFilterExclude.forEach((value, key) => {
if (value) {
exclude.push(key);
}
});
const response = await axios.post<PayloadProps>(`/getSpanFilters`, {
start: props.start,
end: props.end,
getFilters: props.getFilters,
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
exclude: exclude,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getFilters;

View File

@@ -1,26 +0,0 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpans';
const getSpans = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(
`/spans?&start=${props.start}&end=${props.end}&kind=${props.kind}&lookback=${props.lookback}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&limit=${props.limit}&tags=${props.tags}`,
);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpans;

View File

@@ -1,26 +0,0 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpanAggregate';
const getSpansAggregate = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(
`/spans/aggregates?start=${props.start}&end=${props.end}&aggregation_option=${props.aggregation_option}&dimension=${props.dimension}&kind=${props.kind}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&step=${props.step}&tags=${props.tags}`,
);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpansAggregate;

View File

@@ -0,0 +1,59 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import omitBy from 'lodash-es/omitBy';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpans';
const getSpans = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const updatedSelectedTags = props.selectedTags.map((e) => ({
Key: e.Key[0],
Operator: e.Operator,
Values: e.Values,
}));
const exclude: string[] = [];
props.isFilterExclude.forEach((value, key) => {
if (value) {
exclude.push(key);
}
});
const other = Object.fromEntries(props.selectedFilter);
const duration = omitBy(other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(other, (_, key) => key.startsWith('duration'));
const response = await axios.post<PayloadProps>(
`/getFilteredSpans/aggregates`,
{
start: String(props.start),
end: String(props.end),
function: props.function,
groupBy: props.groupBy,
step: props.step,
tags: updatedSelectedTags,
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
exclude,
},
);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpans;

View File

@@ -0,0 +1,60 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import omitBy from 'lodash-es/omitBy';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpanAggregate';
import { TraceFilterEnum } from 'types/reducer/trace';
const getSpanAggregate = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const preProps = {
start: String(props.start),
end: String(props.end),
limit: props.limit,
offset: props.offset,
};
const exclude: TraceFilterEnum[] = [];
props.isFilterExclude.forEach((value, key) => {
if (value) {
exclude.push(key);
}
});
const updatedSelectedTags = props.selectedTags.map((e) => ({
Key: e.Key[0],
Operator: e.Operator,
Values: e.Values,
}));
const other = Object.fromEntries(props.selectedFilter);
const duration = omitBy(other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(other, (_, key) => key.startsWith('duration'));
const response = await axios.post<PayloadProps>(`/getFilteredSpans`, {
...preProps,
tags: updatedSelectedTags,
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
exclude,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpanAggregate;

View File

@@ -0,0 +1,38 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { omitBy } from 'lodash-es';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getTagFilters';
const getTagFilters = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const duration =
omitBy(props.other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(props.other, (_, key) =>
key.startsWith('duration'),
);
const response = await axios.post<PayloadProps>(`/getTagFilters`, {
start: String(props.start),
end: String(props.end),
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getTagFilters;

View File

@@ -2,18 +2,18 @@ import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getServiceOperation';
import { PayloadProps } from 'types/api/user/getUserPreference';
const getServiceOperation = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
const getPreference = async (): Promise<
SuccessResponse<PayloadProps> | ErrorResponse
> => {
try {
const response = await axios.get(`/service/${props.service}/operations`);
const response = await axios.get(`/userPreferences`);
return {
statusCode: 200,
error: null,
message: 'Success',
message: response.data.status,
payload: response.data,
};
} catch (error) {
@@ -21,4 +21,4 @@ const getServiceOperation = async (
}
};
export default getServiceOperation;
export default getPreference;

View File

@@ -2,18 +2,18 @@ import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps } from 'types/api/trace/getServiceList';
import { PayloadProps } from 'types/api/user/getVersion';
const getServiceList = async (): Promise<
const getVersion = async (): Promise<
SuccessResponse<PayloadProps> | ErrorResponse
> => {
try {
const response = await axios.get('/services/list');
const response = await axios.get(`/version`);
return {
statusCode: 200,
error: null,
message: 'Success',
message: response.data.status,
payload: response.data,
};
} catch (error) {
@@ -21,4 +21,4 @@ const getServiceList = async (): Promise<
}
};
export default getServiceList;
export default getVersion;

View File

@@ -2,18 +2,20 @@ import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getTags';
import { PayloadProps, Props } from 'types/api/user/setUserPreference';
const getTags = async (
const setPreference = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(`/tags?service=${props.service}`);
const response = await axios.post(`/userPreferences`, {
...props,
});
return {
statusCode: 200,
error: null,
message: 'Success',
message: response.data.status,
payload: response.data,
};
} catch (error) {
@@ -21,4 +23,4 @@ const getTags = async (
}
};
export default getTags;
export default setPreference;

View File

@@ -8,7 +8,9 @@ const signup = async (
props: Props,
): Promise<SuccessResponse<undefined> | ErrorResponse> => {
try {
const response = await axios.post(`/user?email=${props.email}`);
const response = await axios.post(`/user`, {
...props,
});
return {
statusCode: 200,

Some files were not shown because too many files have changed in this diff Show More