Compare commits

...

515 Commits

Author SHA1 Message Date
Ankit Nayan
da5bf3aea0 release: v0.4.4 2021-11-16 21:49:26 +05:30
pal-sig
28c8df5e63 Fix(FE):trace page (#356)
* chore: Router provider is removed

* update: localstorage set get is added

* update: AppLayout is updated

* fix: adapter type is fixed

* fix: Metric and metric application is now fixed

* fix: Metrics page application is updated

* fix: Tracepage is made fix

* fix: app layout is updated

* fix: global Time reducer is updated

* refactor: getService api is added

* update: metrics reducer is added

* update: service list is fixed

* fix: Metrics page is updated

* fix: api for the metrics application are done

* fix: metrics reducer is updated

* fix: metrics application is updated

* fix: content layout shift is removed

* fix: Metric application is updated

* fix: metrics application is updated

* fix: Metrics application is updated

* fix: Application tab is updated

* chore: graph is updated

* chore: Metrics application is updated

* fix: chart x-axis is label is now fixed

* fix: application tab is updated

* fix: Top end points is added and re-redering in stopped

* fix: fixed the edge case when user changes the global time then updated data is fetched

* fix: Settings page is updated

* chore: AppLayout is updated

* chore: AppLayout is updated

* chore: applayout is updated

* chore: changed default loading is true in the global time reducer

* chore: Global Time option is fixed

* chore: Signup and Applayout is updated

* chore: Button text is updated

* chore: Button in the metrics application is updated

* chore: dashboard menu item position in the side nav is updated

* fix: Logo is now redirecting to the Application page

* fix: Application page is updated

* fix: AppLayout is updated

* fix: starting and ending time is fixed

* fix: Metrics Application is updated to the previous chart data

* update: getDateArrayFromStartAndEnd function is added

* update: Empty graph data is added

* fix: External Call and DB Call Tabs graph are updated when there is no data a empty data is rendered

* fix: onboarding modal condition is fixed and new calling api every 50000 ms to fetch the data

* fix: onBoarding condition modal is updated

* fix: onBoarding condition modal is updated

* fix: onBoarding condition modal is updated

* fix: Application chart re rendering issue is fixed

* fix: Application page is changed when we change the global time

* chore: step size is increased from 30 to 60

* chore: build is now fixed

* chore: metrics application page is updated

* fix: empty graph is now fixed

* fix: application metrics graph is now fixed

* update: seperate api for trace page are made

* fix: /trace page is updated

* chore: Filter of the Trace page is updated

* chore: initial trace page is updated

* fix: changing the filters,fetches the updated values from the backend

* chore: Trace page is updated

* update: trace page is updated

* fix: trace page is updated

* Refresh Text is updated

* update: Trace page is updated

* update:header is updated

* update: Trace page is updated

* update: Trace page is updated

* update: Trace page is updated

* update: Trace page is updated

* update: why did you re render is added

* update: trace page is updated

* update: trace page is updated

* update: Loading is updated

* update: start and end time is updated

* fix: metrics and metrics page redudant calls is reduced

* fix: Metrics Application page reducer is reset on the unmount

* fix: Trace page reducer is reset when the page is unmounted

* fix: Custom Visualizations is now fetching only one api to get the details

* fix: Trace page is updated

* fix: composeEnhancers is updated

* fix: metrics application is updated

* chore: webpack eslint fixes are updated

* chore: some of the type definition is added

* fix(UI): Trace page bug is resolved

* chore(UI): if length of the selected tags is zero updated the value over the form

* chore(UI): check for the no spans filter is updated
2021-11-16 21:13:20 +05:30
pal-sig
510815655f Feat(FE): retention UI (#353)
* feat: get set retention api is updated

* feat: Settings retention UI is updated
2021-11-09 17:10:15 +05:30
Estee Tey Siew Wen
53d52254cb Update webpack config to typescript (#334)
* added docker generated files to .gitignore

* update webpack.config.js to webpack.config.ts

* change web dev server to use port from env

* update webpack-dev-server to 4.3.1,  update import statement for chartjsAdapter

* Revert "added docker generated files to .gitignore"

This reverts commit 494cfcda0e.

* use portfindersync for webpack dev server and remove .env.sample

* add webpack config typing to prod config
2021-10-27 13:24:12 +05:30
Ankit Nayan
655061212f release: v0.4.3 2021-10-22 18:37:47 +05:30
pal-sig
ec11abf54e fix: Logo margin is updated (#352) 2021-10-22 18:31:53 +05:30
Ankit Nayan
f3fb325a13 chore: changed ttl get response to number of hours in int (#351) 2021-10-22 17:15:20 +05:30
pal-sig
fa11cd651e fix: logo is updated to new one (#350) 2021-10-22 17:10:57 +05:30
pal-sig
6f57a0c9b2 fix: Sonar Property is updated (#338) 2021-10-22 17:09:11 +05:30
pal-sig
ae4f75e54b test(FE):Cypress global time test case (#348)
* chore: cypress version is updated

* chore: tsconfig is updated

* update: default fixture json for the api are added

* feat: redux-store is exposed to the Cypress

* test: Login test is updated

* test: global time test for default and metrics application is updated

* chore: removed duplicate test case and commented unused lines
2021-10-22 17:07:57 +05:30
Pranay Kumar
1e33f16943 refactor: dashboard route generation (#340)
* refactor: dashboard route generation

* fix: dashboard link
2021-10-22 17:07:22 +05:30
Exequiel Ceasar Navarrete
b4a9b248cf Remove duplicate @babel/core package from package.json (#341) 2021-10-22 17:06:35 +05:30
Yash Joshi
4ce1297856 chore: update magic comment for DashboardWidget chunk name (#344) 2021-10-22 17:06:05 +05:30
Yash Joshi
e46ff48b80 a11y(signup-page): autofocus first input element (email) (#343) 2021-10-22 17:05:44 +05:30
pal-sig
73e3e061e0 feat: signup logic is now fixed (#349) 2021-10-22 17:05:10 +05:30
Yash Sharma
992644dff7 Added GET/SET endpoint for setting ttl for clickhouse (#304)
* feat: add ttl for clickhouse setup in signoz

* feat: added ttl for metrics table

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* feat: changed the api to use type and duration as params

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added a getter for ttl endpoint

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added a feature to retunr ttl for both metrics and traces

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>
2021-10-20 13:18:19 +05:30
Ankit Nayan
dea74c5f8a Span metrics generation (#347)
* feat: release for span to metrics

* feat: release for span to metrics
2021-10-20 09:28:47 +05:30
Palash
d2b107ec7f Fix(FE): global time (#332)
* chore: Router provider is removed

* update: localstorage set get is added

* update: AppLayout is updated

* fix: adapter type is fixed

* fix: Metric and metric application is now fixed

* fix: Metrics page application is updated

* fix: Tracepage is made fix

* fix: app layout is updated

* fix: global Time reducer is updated

* refactor: getService api is added

* update: metrics reducer is added

* update: service list is fixed

* fix: Metrics page is updated

* fix: api for the metrics application are done

* fix: metrics reducer is updated

* fix: metrics application is updated

* fix: content layout shift is removed

* fix: Metric application is updated

* fix: metrics application is updated

* fix: Metrics application is updated

* fix: Application tab is updated

* chore: graph is updated

* chore: Metrics application is updated

* fix: chart x-axis is label is now fixed

* fix: application tab is updated

* fix: Top end points is added and re-redering in stopped

* fix: fixed the edge case when user changes the global time then updated data is fetched

* fix: Settings page is updated

* chore: AppLayout is updated

* chore: AppLayout is updated

* chore: applayout is updated

* chore: changed default loading is true in the global time reducer

* chore: Global Time option is fixed

* chore: Signup and Applayout is updated

* chore: Button text is updated

* chore: Button in the metrics application is updated

* chore: dashboard menu item position in the side nav is updated

* fix: Logo is now redirecting to the Application page

* fix: Application page is updated

* fix: AppLayout is updated

* fix: starting and ending time is fixed

* fix: Metrics Application is updated to the previous chart data

* update: getDateArrayFromStartAndEnd function is added

* update: Empty graph data is added

* fix: External Call and DB Call Tabs graph are updated when there is no data a empty data is rendered

* fix: onboarding modal condition is fixed and new calling api every 50000 ms to fetch the data

* fix: onBoarding condition modal is updated

* fix: onBoarding condition modal is updated

* fix: onBoarding condition modal is updated

* fix: Application chart re rendering issue is fixed

* fix: Application page is changed when we change the global time

* chore: step size is increased from 30 to 60

* chore: build is now fixed

* chore: metrics application page is updated

* fix: empty graph is now fixed

* fix: application metrics graph is now fixed

* fix: Time selection for custom is fixed

* fix: usage graph is fixed

* fix: global time selector is fixed and empty graph on click handler is added

* fix: metrics application is now fixed
2021-10-20 09:24:55 +05:30
Ankit Nayan
1ebf1a3675 fix: CH query to get metrics data in prometheus fork 2021-10-17 22:39:26 +05:30
pal-sig
02446579a6 Fix(FE): remove unused panel settings (#336)
* chore: unused settings are removed

* chore: unused settings are removed

* chore: unused settings are removed
2021-10-11 16:35:20 +05:30
Bhavin Ag
56fcc0c4a7 chore(jest): setup jest for frontend (#331)
* chore: setup jest tests

* refactor: add explicit command for running with coverage

* chore(jest): update js files to ts files

* chore: update jest setup paths
2021-10-11 16:31:38 +05:30
Adam Szatyin
ce78013646 feat: add ESLint and Prettier to CI pipeline (#329) 2021-10-11 16:30:50 +05:30
Ankit Nayan
8e7367cae1 chore: indexing serviceName, name, kind and traceID (#327) 2021-10-11 16:30:05 +05:30
Adam Szatyin
6f4327bfa1 feat: add Sonar analysis to build pipeline (#324) 2021-10-11 16:26:43 +05:30
pal-sig
050da9a2a9 fix: while saving the widget, queryData(response of the query) is not send over the backend as it is always calc over the frontend (#323) 2021-10-11 16:21:44 +05:30
pal-sig
2c1c0ceea6 fix: isLogged is now accounted into the reducer and redirection logic is moved to the AppLayout (#322) 2021-10-11 16:21:15 +05:30
pal-sig
d69a637275 fix: signup page is now fixed (#337) 2021-10-11 16:16:20 +05:30
pal-sig
d10b9790dc Fix(FE): Pages are refactored (#321)
* feat: signup api is added

* fix: instrument and signup page is refactored

* fix: Settings page is updated
2021-10-11 16:05:59 +05:30
Ankit Nayan
917ef533a3 fix: added empty folder for docker-compose deployment bind mount issues 2021-10-08 16:41:19 +05:30
Vincent Rohde
76102dfc7e Translate README to German 🇩🇪 (#330)
* #240 add German translation of README.md

* #240 add link to German translation in README.md

* #240 sort translation links by alphabetical order
2021-10-04 08:53:26 +05:30
Ankit Nayan
a576092cd4 feat: adding more hostmetrics (#317) 2021-09-28 23:24:14 +05:30
Ankit Nayan
a5fd338a9d release: v0.4.1 2021-09-28 20:32:09 +05:30
Ankit Nayan
8a781076e1 Revert "fix: frontend/package.json, frontend/yarn.lock & frontend/.snyk to reduce vulnerabilities (#310)" (#315)
This reverts commit e756cefa75.
2021-09-28 19:25:32 +05:30
Palash
18fc697b91 Fix dark theme mode (#314)
* theme address is fixed

* theme address is fixed
2021-09-28 19:03:19 +05:30
Palash
93b347d25e Fix(FE): dark mode (#301)
* fix: fav icon is fixed and bootstrap is removed

* fix: return type is updated for the global time reducer

* fix: theme.css is replaced with .min.css

* update: useThemeSwitcher is removed from the graph component and value is grabed from the reducer

* update: instrumentation page is updated

* update: react-css-theme-switcher package is removed

* update: darkMode is updated

* fix: Sider component is updated
2021-09-28 18:50:10 +05:30
Palash
ea5b40c7ea Feat(FE): Delete Query, Save Layout (#306)
* feat: Delete Query functionality is added

* feat: save layout is updated
2021-09-28 18:38:34 +05:30
Palash
cc91242e9a Fix(FE): Fix date dashboard (#311)
* chore: getFormatedDate function is added

* fix: date format in the all dashboard is updated to mm/dd/yyyy HH:MM
2021-09-28 18:32:02 +05:30
Snyk bot
e756cefa75 fix: frontend/package.json, frontend/yarn.lock & frontend/.snyk to reduce vulnerabilities (#310)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-ANSIREGEX-1583908
- https://snyk.io/vuln/SNYK-JS-AXIOS-1579269
- https://snyk.io/vuln/SNYK-JS-D3COLOR-1076592
- https://snyk.io/vuln/SNYK-JS-GLOBPARENT-1016905


The following vulnerabilities are fixed with a Snyk patch:
- https://snyk.io/vuln/npm:debug:20170905
2021-09-28 18:25:41 +05:30
Palash
da653681cf fix: color for the graph is updated (#305) 2021-09-28 18:20:38 +05:30
Palash
93b5a945a4 fix: Chart.js plugins are not register at each and every render (#303) 2021-09-28 18:20:14 +05:30
Palash
9ab1093d81 fix: createdBy is renamed to createdAt (#302) 2021-09-28 18:19:30 +05:30
Palash
b4754053aa fix: fav icon is fixed and bootstrap is removed (#299) 2021-09-28 18:18:30 +05:30
Ankit Nayan
8fef964485 feat: persisting database for dashboards 2021-09-28 18:14:12 +05:30
Ankit Nayan
004dda200c feat: signoz can now scale up in docker swarm (#309)
* feat: signoz can now scale up in docker swarm

* chore: adding empty folders for volume mount

* chore: using image 0.4.0

* chore: adding folder to persist signoz.db
2021-09-28 18:10:44 +05:30
Ankit Nayan
6a01ce88cb chore: exit application if clickhouse is not reachable (#308) 2021-09-27 23:20:27 +05:30
Ankit Nayan
ade8cda91c release: 0.4.0 2021-09-24 16:51:13 +05:30
Ankit Nayan
3b7484f423 Merge branch 'main' of https://github.com/SigNoz/signoz 2021-09-23 16:19:42 +05:30
Ankit Nayan
959aad252c release: 0.4.0 2021-09-23 16:18:36 +05:30
Palash
7b70cfb0c4 feat: Metrics (#281)
* refactor: store is updated

* temp

* fix: eslint error is fixed

* fix:eslint linting error is updated

* chore: react-grid-layout is added

* chore: linting changes are updated

* chore: linting changes are updated

* chore: @types/node is moved to devDependecies and @types/react-grid-layout is added

* chore: tsconfig is updated

* chore: updateUrl function is updated

* feat: All Dashboard is updated

* feat: All Dashboard page is updated

* feat: New Dashboard is added

* feat: App Layout is updated

* feat: Add Tags is updated

* chore: uuid package is added

* chore: AppRoutes is updated

* chore: UI components are updated

* chore: baseUrl is added in the apiUrl and removed from other api request

* chore: commonApi Response is updated

* chore: ErrorResponse handler is updated

* chore: useFetch hook is made

* chore: axios instance is updated

* chore:some of the changes are updated

* chore: list of all dashboard types is updated

* chore: logic is updated to the global state

* chore: all dashboard data is fetched from the global state

* chore: unnessary prop is removed

* chore: changes are updated

* chore: getAll and create is updated

* chore: getDashboard is updated

* chore: isEditMode is moved to the global state

* chore: get,getAll is updated

* chore: update title,tags,description is now fixed

* chore: new widget is updated

* chore: graph is updated

* chore: input component accept input props

* chore: name of the dashboard is updated

* chore: Widgets page in WIP

* chore: types for the error api is updated

* chore: getQuery data is updated

* chore: widget types is updated

* default widget is updated

* chore: getQuery is updated

* chore: Add Query is updated

* fix: creating new widget bug is resolved

* chore: widget type is updated

* chore: Query error is updated

* chore: query error and success state is handled

* chore: label of graph in WIP

* chore: legend input placeholder is updated

* chore: changes are updated

* chore: no data component is updated and error component is rendered along with the data

* chore: data fetching over the initial render is fixed over the initial mount

* chore: convertDateToAndPm is updated

* chore: x-axis label is now fixed

* chore: label is updated

* chore: labels name is updated

* chore: labels name is updated

* chore: labels color is updated

* chore: values are parsed in float

* chore: tags is updated

* chore: datasets type is updated

* chore: graph is updated

* chore: more eslint rules are updated

* chore: some of the linting changes and data is updated

* chore: chart.js version is updated

* chore: gitignore is updated

* chore: graph component is updated

* chore: apply functionality is updated

* chore: dashboard is now saved

* chore: getChartData is updated

* feat: Dashboard graph is reflected

* chore: some of the bugs is resolved

* fix: aspect ratio is made false

* chore: some small css are fixed

* chore: widgetId and graphType is preAdded if present in the search params

* chore: user is now able to change the time via global time and reflect new graph values

* chore: query is updated

* chore: onBlurHandler is updated

* fix: usage explorer is now fixed

* chore: bar element is updated

* chore: chartjs adapter is added

* chore: old instance for the charts are removed via re-chart

* chore: re-chart is removed

* chore: get chart data is updated

* chore: added the counter in the useEffect

* chore: history is added

* chore: some of the features are updated

* chore: history package is updated

* chore: AppRoutes is updated

* fix: some are components breaking while moving from BrowserRouter to Router

* chore: Dashboard icon is updated

* chore: Full screen component is updated

* stepSize (optional) is added in the widgets type

* fix: fetching query result is fixed

* update: start and end time function is updated

* fix: Alert color is updated

* update: Query fetching is updated

* fix: start and end time is fixed

* fix: chartjs data is compatable for larger data set and no ajax call for empty query is fixed

* fix: last 1 week selection is fixed

* fix: legends is added

* update: antd version is updated

* feat: value graph is updated

* feat: Title is added for the value graph

* fix: Full Screen view is updated with refresh functionality and alignment is updated to flex-end

* fix: Graph component is updated

* fix: metric graph are fixed

* feature: Delete widget functionality is updated

* fix: empty value bug is resolved

* fix: delete widget position is fixed

* fix: resize functionality is fixed

* fix: sumation of the query is fixed

* update: default legend is removed

* update: resize handlers is removed and service metric component is updated

* fix: legends is updated

* update: querySuccess reducer is updated

* Modal component is updated

* fix: ant-d tab css is updated of the tabs

* update: stringToHTML is made

* update: graph component is updated

* fix: several component in the metric and traces are updated

* wip: build error is fixed

* fix: metric section is fixed

* update: console.log are commented

* fix: onClick graph re-render is stopped

* fix: trace graph is updated

* fix: updated the min,max time for the value type graph

* getQueryMaxMin Time is updated

* fix: trace chart is updated

* fix: re-render is fixed

* fix: localstorage persistance is there

* update: if label is not present legend is not displayed

* fix: graph is changed while updated the global time

* fix: default title is updated while creation of the dashboard

* update: external database call tabs are made of same size

* fix: query graph max-min time is updated in the full screen mode

* fix: Request per sec graph is fixed

* fix: ErrorChart is fixed

Co-authored-by: Palash gupta <palashgdev@gmail.com>
2021-09-23 15:43:43 +05:30
Ankit Nayan
53d5e37b5f feat: enable data persistence in clickhouse docker (#297) 2021-09-20 16:15:02 +05:30
Pranay Prateek
69821cc13c Update README.md 2021-09-18 13:00:18 +05:30
Lucas Barbosa
a555c2cb93 docs: translate readme.md into portuguese-brazil (#238) (#296) 2021-09-18 12:57:18 +05:30
Pranay Prateek
24d51e3c3a Update install.sh 2021-09-16 20:57:32 +05:30
Vamsi Krishna
368e11e17a fix: updated the footer year (#290)
Co-authored-by: Palash <88981777+palash-signoz@users.noreply.github.com>
2021-09-06 22:04:43 +05:30
Ankit Nayan
118ee9dd90 fix: fixed cors error for PUT (#287) 2021-09-02 14:55:07 +05:30
Ankit Nayan
30961da59f Crud APIs for dashboards (#286)
* added signoz.db to gitignore

* model and crud methods for dashboard package

* added signoz.db to dockerignore

* feat: dashboards crud WIP

* chore: moving response format to correct file

* chore: adding dependencies for sqlite3

* feat: CRUD APIs ready for dashboards

* fix: sqlite needs cgo enabled and hence need to add some flags in building go code

* feat: provision dashboards using json

* chore: mounting dashboard folder to container
2021-09-02 13:18:47 +05:30
Jacoberson
9692b9985a fix: hot reload issue (#279)
* fixing hot reload issue

* Update webpack.config.js

removed duplicate key-value pairs.

Co-authored-by: Palash <88981777+palash-signoz@users.noreply.github.com>
2021-09-02 11:34:40 +05:30
Ankit Nayan
98ab64cb94 chore: query-service 0.4.0 2021-08-30 17:16:32 +05:30
Raj Babu Das
f883d02ff7 fixing codeql workflow (#283)
* fixing codeql

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* fixing codeql

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>
2021-08-29 13:01:38 +05:30
Raj Babu Das
fff9031bf7 Adding codeql (#253)
Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-29 10:37:34 +05:30
Ankit Nayan
32ad4ef571 Feat: enables metrics ingestion to signoz (#271)
* WIP promql support

* forked prometheus and promhouse integrated

* removing __debug_bin from  git

* feat: prometheus config file to load

* feat: read prometheus config from args

* fix: WIP fixing errors in docker build

* feat: added clickhousemetricswrite exporter in metrics

* feat: changing otelcol image tag

* fix: read prometheus.yml from config flag in docker-compose

* fix: WIP clickhouse connection error

* fix: used signoz/prometheus tag v1.9.4

* chore: response format as in prometheus

* chore: query_range works with clickhouse reader and throws not implemented error for druid

* chore: moved ApiError struct to model

* feat: enabled instant query api for metrics

* chore: parser for instant query api params
2021-08-29 10:28:40 +05:30
palash-signoz
66b423588e Feature(FE): cypress base test case are updated (#275)
* chore: video config is updated as it will not generate any video while running cypress

* chore: cypress.env.json is added in the env file

* chore: tsConfig is updated

* feature: Cypress is updated with some of the test cases

* chore: default test case is removed

* chore: convertToNanoSecondsToSecond function is updated

* chore: lock files, node_modules are ignored in git

* test: metric are updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-27 12:21:24 +05:30
palash-signoz
e0be48a527 refactor(frontend): Sidebar is updated (#276) 2021-08-27 12:13:32 +05:30
palash-signoz
4143e313da Fix(FE): Eslint Prettier are configured (#269)
* fix(FE): eslint

* chore: run eslint on frontend folder

* chore: run eslint on src

* chore: eslint fixing is updated

* chore: linting errors are updated

Co-authored-by: Nidhi Tandon <nidhitandon08@gmail.com>
2021-08-26 11:50:47 +05:30
Pranay Prateek
e1fbe265d8 Update README.md 2021-08-23 20:47:14 +05:30
Pranay Prateek
84002fa123 Update README.md 2021-08-23 20:46:40 +05:30
Ankit Nayan
7f2546ec97 release: 0.3.6 2021-08-23 12:07:09 +05:30
Yash Joshi
68b1b8d975 chore: remove old scripts (#267)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-23 11:53:05 +05:30
palash-signoz
ac789ffcf0 bug: commitlint.yml is fixed (#266)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-23 11:52:28 +05:30
palash-signoz
1272e18672 Feature(FE): Setup cypress (#263)
* gitignore is updated

* cypress is updated

* json is updated

* default test case is updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-23 11:52:02 +05:30
palash-signoz
9008d19a7b fix(FE): AppRoutes is refactored (#260)
* react-app-env.d.ts is moved to the typings

* webpack config for development and production is updated

* extra browser router component is removed

* loable component is made

* spinner component is updated

* route are updated

* routes are imported is Loadable fashion with chunkName

* AppRoute is updated

* AppWrapper is changed to AppRouter

* merge conflits are resolved

* Loadable component is updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-23 11:38:25 +05:30
Ankit Nayan
f394f72bfb feat: grpc error calculation added to druid query (#268)
* feat: added statusCode for grpc

* feat: errors will now have grpc errors too

* removing dependency on viper

* grpc error calculation added to druid queries
2021-08-23 10:13:14 +05:30
Ankit Nayan
45cb0353e6 Feat: Enables error from grpc calls (#265)
* feat: added statusCode for grpc

* feat: errors will now have grpc errors too
2021-08-23 09:19:54 +05:30
Ankit Nayan
506c34f385 release: 0.3.5 2021-08-20 12:45:37 +05:30
Ankit Nayan
aca67d4f33 fix: removing action on pr as secret is not shared by forked repos 2021-08-20 12:42:49 +05:30
palash-signoz
c00e9f5236 contribution.md for frontend local instruction is updated (#264)
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-19 23:15:08 +05:30
palash-signoz
8cef9de35c fix: css issue for the tabs is updated (#259)
Co-authored-by: FIPalash Gupta <palash@indiagold.co>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-19 23:13:44 +05:30
palash-signoz
4817a17320 fix(FE): onFocusSelected bug is resolved (#258)
* fix: onFocusSelected bug is resolved

* dependecies array is updated

* updated the dependency array

* fix: TraceGantt chart is updated

Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-19 23:13:16 +05:30
palash-signoz
0055eaf656 fix: css issue for the SelectedSpanDetails is updated (#257)
Co-authored-by: FIPalash Gupta <palash@indiagold.co>
Co-authored-by: Ankit Nayan <ankit@signoz.io>
2021-08-19 23:04:15 +05:30
palash-signoz
4b205e61c8 fix(FE): tsConfig baseUrl is used rather than using the alias for module (#256)
* tsconfig-paths-webpack-plugin package is added

* baseUrl is updated

* webpack config for development and production are updated

* baseUrl is removed in the file

* more .. is being removed

* chore: removed the commented part in the webpack.config

Co-authored-by: FIPalash Gupta <palash@indiagold.co>
2021-08-19 22:53:33 +05:30
Ankit Nayan
77c0237ba1 feat: PR will trigger build and push of docker image in pattern pull-NUMBER 2021-08-19 20:23:24 +05:30
palash-signoz
a2acee209c commit lint is fixed (#261) 2021-08-18 18:56:44 +05:30
Ankit Nayan
5381dc7e56 preparing for release 0.3.4 2021-08-10 23:39:27 +05:30
Ankit Nayan
76848b8925 fix: api method allowed 2021-08-10 23:32:28 +05:30
Ankit Nayan
3f32322385 chore: adding commitlint 2021-08-03 02:06:34 +05:30
Ankit Nayan
29c26777b6 fix: fixed branch name in release drafter 2021-08-03 01:31:41 +05:30
Ankit Nayan
f861a5c77d adding release-drafter 2021-08-03 01:04:59 +05:30
Ankit Nayan
d1a4bb10ea Update push.yaml 2021-08-02 23:37:41 +05:30
Ankit Nayan
41e2c6b075 release 0.3.3 2021-08-02 22:15:01 +05:30
Abhishek Sehgal
b5b0725cc4 feat: Allow users to search spans with status code regex (#249) 2021-08-02 15:38:18 +05:30
Rahul Rana
3acef9c86a chore: use a new port if existing port is in use (#199)
* BugFix:Open a new port on local dev server if existing port is being used.

* Update Read me and dev dependcies

* Fix yarn package and add npmrc file
2021-07-31 12:43:00 +05:30
Kishore
48e32878e6 feat(FE: Span): add span kind filter (#219)
* Added span kind filter

* changed state to const

* Removed unnecessary console

* Fixed undefined issue, changed a bit in the spanKind type

* set default value for parameter passed in handleChangeSpanKind func
2021-07-30 17:21:41 +05:30
Palash gupta
f070bdf5b9 feat(FE): add Not Found Route (#217)
* base layout is updated

* app wrapper is updated

* InstrumentationPage spell is corrected

* not found is updated

* not found component is updated

* changes are updated

* appwrapper is updated

* fix: removed the unwanted file

* fix: React.FC is removed

* fix: styles are imported under single file

* webpack config is updated

* webpack config is updated

* env is updated
2021-07-30 11:47:58 +05:30
Vimalraj
888e3ff79b Added new Spiner Component and Changed the Old Spin indicator (#224) 2021-07-29 18:26:38 +05:30
Palash gupta
3e8c9308b6 Fix/base url (#212)
* env is updated

* webpack config is updated

* webpack config is updated

* dotenv-expanded is removed

* vars is updated

* env is updated

* docker ignore is updated

* webpack config is updated

* dev and build is updarted with the progress arguments

* config is updated
2021-07-29 17:35:43 +05:30
Pranay Prateek
a3c1080519 Update README.md 2021-07-25 22:35:58 +05:30
Pranay Prateek
350a49060f Update README.md 2021-07-25 22:35:21 +05:30
Tony Qu
35abf2dddf translate readme.md into simplified chinese (#244)
* translate into simplified chinese

* fix missing translation
2021-07-25 22:24:10 +05:30
Pranay Prateek
9a32db608f Update CONTRIBUTING.md 2021-07-19 22:31:15 +05:30
Pranay Prateek
11c94c0fbb fix(README): Updated deployment instructions 2021-07-19 20:42:19 +05:30
Pranay Prateek
5431333b1a Update README.md 2021-07-17 19:18:41 +05:30
Pranay Prateek
dd34bd5990 Updates slack url in README (#232)
* updated slack url in readme
2021-07-17 18:56:53 +05:30
Pranay Prateek
a8adadcfff Added contributors image on README (#231)
* updated contributors image in readme
2021-07-17 18:02:10 +05:30
Pranay Prateek
db3c7a3b9f chore: fixing readme (#230)
* updated readme


* updated readme - docs link
2021-07-17 15:02:38 +05:30
Pranay Prateek
1d479ca158 Updated ReadMe (#229)
* updated readme - docs link
2021-07-17 14:57:43 +05:30
Pranay Prateek
76e7bd5292 Updated ReadMe (#228)
* updated README for for clarity
* added comparison with other familiar tools
2021-07-17 14:23:58 +05:30
Pranay Prateek
2f99a661de Update CONTRIBUTING.md 2021-07-15 10:50:01 +05:30
Raj Babu Das
b1169c7315 Adding multi architecture (ARM64 and AMD64) support for signoz components (#201)
* Adding multi arch support for amd64 and arm64

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* test

* reset package-lock.json

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* reset yarn-lock.json

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* reset yarn-lock.json

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>
2021-07-14 23:36:27 +05:30
Pranay Prateek
ec0059dbd8 Update CONTRIBUTING.md 2021-07-14 21:18:32 +05:30
Pranay Prateek
3d4bdec4ba Update CONTRIBUTING.md 2021-07-14 21:16:27 +05:30
Pranay Prateek
437de3682c Update CONTRIBUTING.md 2021-07-14 21:15:23 +05:30
Pranay Prateek
9e76106ee0 Update CONTRIBUTING.md 2021-07-14 21:14:17 +05:30
Pranay Prateek
553967f76b Update CONTRIBUTING.md 2021-07-14 21:13:34 +05:30
NIDHI TANDON
beb15e0a5f feat(FE:TraceGanttChart): scroll to selected row on page load (#213)
* feat(FE:TraceGanttChart): scroll to selected scroll on page load

* refactor: update handleScroll

* refactor(traceGanttChart): scroll on selected row
2021-07-14 12:35:40 +05:30
Palash gupta
27e2ceffaa gitignore is updated (#211)
Co-authored-by: NIDHI TANDON <nidhi-tandon@users.noreply.github.com>
2021-07-14 12:31:00 +05:30
Palash gupta
7de1737f5f readme.md is updated (#209) 2021-07-05 23:27:11 +05:30
NIDHI TANDON
7917b5f3b4 Merge pull request #195 from SigNoz/refactor-gant-chart
refactor(FE: trace-gantt-chart): add ts support and change folder structure
2021-07-03 14:14:17 +05:30
NIDHI TANDON
470b68aa67 Merge branch 'main' into refactor-gant-chart 2021-07-03 11:38:49 +05:30
Kishore
1255da08dc Instrumentation page light mode fix (#202) 2021-07-02 23:39:35 +05:30
Nidhi Tandon
8c21aeb2a9 Merge branch 'main' into refactor-gant-chart
# Conflicts:
#	frontend/src/modules/Traces/SelectedSpanDetails.tsx
2021-07-02 14:26:42 +05:30
NIDHI TANDON
6f2b66c286 refactor(SelectedSpanDetails): add styled components, minor refactoring (#191) 2021-06-28 21:32:41 +05:30
NIDHI TANDON
d09c63331d fix(tabs): add gutter between tabs (#196) 2021-06-27 23:25:18 +05:30
NIDHI TANDON
af68ca52ba feat(FE): add eslint (#192)
* feat: add eslint

* refactor(package.json): remove extra eslint config
2021-06-27 23:23:55 +05:30
Ankit Nayan
655f8b65a2 Merge branch 'main' of https://github.com/signoz/signoz into main 2021-06-27 12:27:04 +05:30
Ankit Nayan
c07f68333e removing unnecessary files 2021-06-27 12:26:50 +05:30
Nidhi Tandon
421a102291 refactor: add ts support to selectedSpanDetails and TraceGanttChartHelpers, change folder structure 2021-06-27 08:47:46 +05:30
Raj Babu Das
1d5ce423f2 Fixing github workflow push pipeline (#190)
* adding download env step in  github workflow

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* adding download env step in  github workflow

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>
2021-06-26 18:10:23 +05:30
Raj Babu Das
0c12eaf89b adding download env step in github workflow (#189)
* adding download env step in  github workflow

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* adding download env step in  github workflow

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>
2021-06-26 00:44:13 +05:30
Raj Babu Das
09586faed2 Adding CI pipeline with github workflow for query service, frontend, flattener (#173)
* Adding github workflow for signoz

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* Adding github workflow for signoz

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* minor fix

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* minor fix

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>

* changing branch name from master to main

Signed-off-by: rajdas98 <mail.rajdas@gmail.com>
2021-06-25 23:54:21 +05:30
Yash Joshi
bf0c1a3dcc chore: fix tsconfig to support webpack alias (#175) 2021-06-25 23:49:53 +05:30
NIDHI TANDON
77abb47b4c fix: show timeline in decimals (#188) 2021-06-25 10:03:21 +05:30
Ankit Nayan
2e71230bbf changed frontend images to 0.3.2 2021-06-24 23:04:01 +05:30
NIDHI TANDON
bf2002d6a2 feat: gantt charts for spans (#184)
* feat: create a base component for trace gantt chart

* fix: max and min calc

* fix: focus on selected paths

* fix: build issue

* fix: convert duration to ms

* fix: gantt chart cells margin left

* feat: sorted data by startTime

* feat: update layout and add select functionality to table

* feat: add UI and functionality

* feat: make row clickable in traces, show tags on gant chart click and some fixes

* feat: sort flamegraph and show tags on row click on gantt chart

* feat: change table type to radio and disable parent selection

* fix: left padding of gantt chart lines

* fix: line chart duration

* fix: sorting flame graph

* fix: reset zoom on flame graph

* fix: expand children on row click, show tags on page load, default expand on page load

* style(gantt-chart): make gantt chart buttons & tags sticky

* style(gant-chart): margin bottom in table & padding of gant

* feat: update content on trace list
2021-06-24 22:28:32 +05:30
Ankit Nayan
59749d0576 removing unnecessary call to links (#180) 2021-06-19 01:16:56 +05:30
Yash Joshi
c9c6ccc687 style: remove a extra dangling comma (#176) 2021-06-15 00:42:59 +05:30
Pranay Prateek
88082c1278 Merge pull request #177 from pranay01/main
Updated contributing.md
2021-06-12 17:49:15 +05:30
Pranay Prateek
84f150bc18 removed target blank from readme 2021-06-12 17:47:33 +05:30
Pranay Prateek
299e80ca49 updated contributing steps 2021-06-12 17:45:06 +05:30
Pranay Prateek
7127dec6f6 Merge branch 'main' of github.com:pranay01/signoz into main 2021-06-12 17:40:40 +05:30
Yash Joshi
6afb91fa84 chore: add nvmrc file and add engines in package.json (#164) 2021-06-12 11:36:12 +05:30
Sai Deepesh
72f5688194 added min-max values check in latency (#158)
* added min-max values check in latency

* error handling antd way

* separated rules validation logic into functions
2021-06-12 10:30:10 +05:30
Ankit Nayan
a118c3c8a1 adding frontend image tags 0.3.1 2021-06-08 22:36:17 +05:30
Ankit Nayan
9baf873521 Merge pull request #170 from SigNoz/fix-serviceMap-zoom
(Fix) - serviceMap zooms correctly
2021-06-07 20:58:53 +05:30
Ankit Nayan
12911db945 (Fix) - serviceMap zooms correctly 2021-06-07 20:58:02 +05:30
Yash Joshi
bd149f4364 fix: trace graph styles (#165)
- Prevent vertical shift on hover due to border
- Show faded traces
2021-06-07 20:51:57 +05:30
Anwesh Nayak
c69b9ae62a feat(docs): Update bug_report.md and add version section (#152)
* feat(docs): Update bug_report.md and add version section

* fix(docs): remove duplicate additional context error in markdown

Co-authored-by: anweshknayak <anweshnayak@Anweshs-MacBook-Air.local>
2021-06-07 20:50:26 +05:30
Ankit Anand
bc3f16d3de changed query-service image tag to 0.3.1 and druid image tag to 0.21.1-rc2 2021-06-07 17:55:26 +05:30
Ankit Nayan
61bbd5551b Merge pull request #168 from SigNoz/druid_permission_fix
Druid permission fix
2021-06-07 17:24:53 +05:30
Ankit Anand
286577d13d added new druid image to fix docker permission issue 2021-06-07 17:23:34 +05:30
Ankit Anand
dbd0701779 delete test folder for interface implementation 2021-06-07 16:59:40 +05:30
Ankit Anand
0c7a5ce3c7 added limit to serviceMapDependencies query to druid 2021-06-07 16:59:13 +05:30
Pranay Prateek
a92381df1b Merge pull request #167 from SigNoz/ankit01-oss-patch-1
Updated description
2021-06-07 12:32:35 +05:30
Ankit Anand
eb1509d385 Updated description 2021-06-07 12:27:22 +05:30
Ankit Nayan
34e33af290 Merge pull request #156 from SigNoz/check-antd-css
refactor: remove antd unused css
2021-06-05 19:08:18 +05:30
Ankit Nayan
c0004cd51c Merge pull request #157 from jyash97/fix/suspense-loader
fix: suspense for lazy loaded pages
2021-06-05 17:29:24 +05:30
Ankit Nayan
10bf545c65 Merge branch 'main' into fix/suspense-loader 2021-06-05 17:29:09 +05:30
Ankit Nayan
7d2bcf11c3 Merge pull request #150 from jyash97/fix/router-prop
fix: react-router prop
2021-06-05 16:59:55 +05:30
Yash Joshi
3ff7ace54e fix: suspense for lazy loaded pages
Add suspense to track the route splitted modules, this will make sure that the sidebar and navbar is not unmounted whenever modules start fetching.
2021-06-05 12:38:37 +05:30
Nidhi Tandon
abdfe6ccc5 chore: remove unused files 2021-06-05 11:50:25 +05:30
Ankit Nayan
aa398263fb Merge pull request #155 from SigNoz/installation-default-clickhouse
choose clickhouse on enter press
2021-06-05 11:50:12 +05:30
Ankit Anand
ace02486e0 choose clickhouse on enter press 2021-06-05 11:48:44 +05:30
Yash Joshi
b318ba6b2f fix: router prop name 2021-06-04 11:54:18 +05:30
Nidhi Tandon
de4be411f4 refactor: remove antd unused css 2021-06-03 21:25:29 +05:30
Ankit Nayan
362f264bae Installation changes for docker (#149)
* installation steps WIP

* changing install.sh

* fixes

* fixes

* fixes

* handled enter key press in setup_type

* fixes

* fixes

* fixes

Co-authored-by: Ankit Anand <cruxaki@gmail.com>
2021-06-03 20:54:41 +05:30
Pranay Prateek
e94d984cdb Merge pull request #148 from jyash97/patch-1
docs: update slack invite link
2021-06-03 17:24:02 +05:30
Yash Joshi
bf0267d579 docs: update slack invite link 2021-06-03 15:32:36 +05:30
Ankit Nayan
e4b3ea1f34 Merge pull request #145 from SigNoz/spanAggregatesAPI
added spansAggregate API implementation for clickhouse
2021-06-02 18:34:56 +05:30
Ankit Anand
4ee6d4b546 added spansAggregate API implementation for clickhouse 2021-06-02 18:34:03 +05:30
Ankit Anand
a7836c26d0 kubernetes configs updated 2021-06-02 11:45:00 +05:30
Ankit Anand
15eb5364d5 added healthcheck to druid router service and added it as dependency of query-service 2021-06-02 00:43:30 +05:30
Ankit Anand
47bf512a33 added healthcheck for query service to wait for router service 2021-06-01 17:17:12 +05:30
Ankit Anand
2776bfa311 added nginx config for gzip 2021-06-01 16:45:45 +05:30
Ankit Anand
8c7ac88f84 added STORAGE env variable to deployment templte 2021-06-01 16:23:19 +05:30
Ankit Anand
a08ad9e2cf changed image versions to 0.3.0 2021-06-01 16:22:50 +05:30
Ankit Anand
d312398f18 changed values for release 0.3.0 2021-06-01 15:57:21 +05:30
Ankit Nayan
d891c3e118 Merge pull request #144 from SigNoz/query_refactor
Query Service refactor to add interface for APIs
2021-06-01 15:17:19 +05:30
Ankit Anand
1e7b68203f added interface for spanAggregates API 2021-06-01 15:13:48 +05:30
Ankit Anand
3d152e23cd Merge branch 'main' into query_refactor 2021-06-01 11:50:27 +05:30
Ankit Nayan
47cf1eebf7 Merge pull request #143 from SigNoz/test-dropdown-fix
fix: add dark and compact theme css to index.css
2021-06-01 11:37:29 +05:30
Nidhi Tandon
6c84882dca fix: add css back to assets 2021-06-01 11:14:20 +05:30
Ankit Anand
a4424eca0e changed to port 8080 2021-06-01 10:00:48 +05:30
Ankit Anand
77992a59bc GetServices API sorted by p99 desc 2021-05-31 23:48:10 +05:30
Ankit Anand
3cbb071138 Merge branch 'main' into query_refactor 2021-05-31 22:33:47 +05:30
Ankit Nayan
9cd6e5cabe Merge pull request #140 from SigNoz/change-p90
feat: update p90 to p95
2021-05-31 22:01:29 +05:30
Nidhi Tandon
13bec63fca feat: update p90 to p95 2021-05-31 21:59:36 +05:30
Ankit Nayan
f2164a1a86 Merge pull request #138 from SigNoz/change-p90
feat: update response param p90 to p95 for /top_endpoints
2021-05-31 21:58:20 +05:30
Nidhi Tandon
8a4f58e77b Merge branch 'main' into change-p90
# Conflicts:
#	frontend/src/store/actions/metrics.ts
#	frontend/src/store/reducers/metrics.ts
2021-05-31 21:56:25 +05:30
Ankit Nayan
51a24673b9 Merge pull request #139 from SigNoz/update-zoom-px
feat: update zoom pixels based on screen width
2021-05-31 21:55:29 +05:30
Ankit Nayan
c94feb9af2 Merge pull request #136 from SigNoz/refactor-metrics-reducer
refactor(FE: Reducers): metrics reducers & actions
2021-05-31 21:54:06 +05:30
Nidhi Tandon
a8668d19a8 Merge branch 'main' into refactor-metrics-reducer
# Conflicts:
#	frontend/src/store/reducers/index.ts
2021-05-31 21:52:47 +05:30
Ankit Nayan
a8e81c9666 Merge pull request #133 from SigNoz/remove-bundle-analyzer
ci(FE): remove webpack bundle analyzer
2021-05-31 21:49:04 +05:30
Ankit Nayan
2eed75560d Merge pull request #130 from SigNoz/refactor-redux
refactor(FE: traceFilters): remove multiple reducers
2021-05-31 21:48:41 +05:30
Nidhi Tandon
8d6fb7f897 feat: update zoom pixels based on screen width 2021-05-31 21:45:03 +05:30
Nidhi Tandon
4cd0088029 fix: move traces actions to common action types 2021-05-31 21:26:27 +05:30
Nidhi Tandon
872c8adbbb feat: update response param p90 to p95 2021-05-31 21:06:39 +05:30
Ankit Anand
bba7344bae fixes for CH API implementations 2021-05-31 18:05:54 +05:30
Ankit Anand
51fe634566 More methods from interface implemented for ClickHouse 2021-05-31 11:14:11 +05:30
Nidhi Tandon
af58d085a0 feat(FE: Reducers): Combine Metrics reducers and refactor Metrics actions 2021-05-30 19:07:37 +05:30
Nidhi Tandon
5b9b344816 chore(FE): remove webpack bundle analyzer 2021-05-30 12:41:30 +05:30
Ankit Nayan
1caa07e0af Merge pull request #131 from SigNoz/gzip
ci: gzip bundle
2021-05-30 11:58:08 +05:30
Ankit Nayan
ae23cec8d6 Merge pull request #132 from SigNoz/enable-gzip-frontend
added gzip config to nginx conf file
2021-05-30 11:57:25 +05:30
Ankit Anand
5afc04f205 added gzip config to nginx conf file 2021-05-30 11:55:47 +05:30
Ankit Anand
6aed23ce66 clickhouse implementation WIP 2021-05-30 11:14:55 +05:30
Nidhi Tandon
007e2e7b78 ci: gzip bundle 2021-05-30 10:39:19 +05:30
Ankit Anand
762a3cdfcd dbOverview API with nullable string 2021-05-29 22:15:49 +05:30
Nidhi Tandon
308f8f8fed refactor(reducers): remove multiple reducers 2021-05-29 16:46:48 +05:30
Ankit Anand
588bf2b93a Merge branch 'main' into query_refactor 2021-05-29 16:37:46 +05:30
Ankit Anand
fff38b58d2 span search api working 2021-05-29 16:32:11 +05:30
Anwesh Nayak
cbd2036613 fix(script): add message to kill docker containers (#128)
* fix(script): add message to kill docker containers

* fix(script): add message to kill docker containers

* fix(script): add message to kill docker containers

Co-authored-by: anweshknayak <anweshnayak@Anweshs-MacBook-Air.local>
2021-05-29 13:13:39 +05:30
Ankit Nayan
7ef72d4147 Merge pull request #125 from anweshknayak/doc-fix
fix(doc): correct doc url for troubleshooting
2021-05-29 12:24:10 +05:30
anweshknayak
07af5c843a fix(doc): correct doc url for troubleshooting 2021-05-28 20:51:11 +05:30
Ankit Anand
e524ce5743 Merge branch 'main' into query_refactor 2021-05-28 11:43:38 +05:30
Ankit Nayan
24e1346521 Merge pull request #122 from SigNoz/reduce-bundle-size
refactor: remove unused lib and code
2021-05-27 13:35:56 +05:30
Ankit Anand
62e77613a6 sample API working in CH 2021-05-27 12:52:34 +05:30
Nidhi Tandon
56c0265660 refactor: remove unused lib and code 2021-05-26 20:38:18 +05:30
Ankit Nayan
91b1d08dff Merge pull request #119 from SigNoz/fix-endpoints-css
fix(css): end points overflow issue
2021-05-25 12:40:52 +05:30
Nidhi Tandon
239c2cb859 feat(css): add tooltip to button hover & fix css 2021-05-24 21:48:01 +05:30
Nidhi Tandon
4173258d0a fix: end points overflow issue 2021-05-23 17:04:47 +05:30
Ankit Nayan
1cbbdd8265 Merge pull request #118 from SigNoz/fix-api-call-twice
fix: call api with update value
2021-05-23 17:02:56 +05:30
Ankit Anand
433f3f3d94 clickhouse implementation WIP 2021-05-23 16:45:00 +05:30
Nidhi Tandon
fed23a6ab9 chore: add comments 2021-05-23 16:06:40 +05:30
Nidhi Tandon
b979c24cb4 refactor: remove unused prop 2021-05-23 15:43:38 +05:30
Nidhi Tandon
e4b41b1a27 feat: load data based on isLoaded flag 2021-05-23 15:40:48 +05:30
Nidhi Tandon
44495b7669 feat(ServiceMap): dispatch isLoaded route via context 2021-05-23 14:15:13 +05:30
Pranay Prateek
cc3133b2d6 Update README.md 2021-05-22 21:49:56 +05:30
Ankit Anand
9c83319143 interface working with druid APIs 2021-05-22 19:51:56 +05:30
Pranay Prateek
571c08c58e Update issue templates 2021-05-22 19:01:39 +05:30
Pranay Prateek
092cfc7804 Update issue templates 2021-05-22 18:38:31 +05:30
Nidhi Tandon
245050aac2 fix(ServiceMap): multiple api calls of date picker 2021-05-22 17:26:16 +05:30
Ankit Anand
606fa6591d added test folder for testing interface 2021-05-22 13:35:30 +05:30
Ankit Anand
55f7f56acf releasing v0.2.2 2021-05-18 16:55:49 +05:30
Ankit Nayan
e6b3a6c9db Merge pull request #107 from SigNoz/issues-106
Display upto 20 characters in name of service in ServiceMap
2021-05-17 18:03:32 +05:30
DIO
d6884cacdb Merge branch 'main' into issues-106 2021-05-17 17:43:04 +05:30
Ankit Nayan
bb155d2356 Merge pull request #109 from SigNoz/issues-93
Add default view in dropdown service-picker in ServiceMap
2021-05-17 17:23:26 +05:30
dhrubesh
c49ffd83a3 remove logs 2021-05-17 14:43:20 +05:30
dhrubesh
8a5178f0dc adds default view option 2021-05-17 14:42:39 +05:30
dhrubesh
057fba112b updates max length 2021-05-17 14:29:35 +05:30
Ankit Nayan
4c0b81b5c7 Merge pull request #108 from SigNoz/remove-zoom-post-stable
Removes abrupt zoom post becoming stable
2021-05-17 11:58:32 +05:30
dhrubesh
1d2f964a63 updates text color 2021-05-17 09:39:20 +05:30
dhrubesh
171fd714de removes zoom post becoming stable 2021-05-17 09:06:05 +05:30
Ankit Anand
789880fa07 changing constants for zoom-in for different screen sizes 2021-05-16 19:55:36 +05:30
Ankit Nayan
f25edf1e29 Merge pull request #102 from SigNoz/issue-92
Change time range in api call of Service Map to 1 min from latest
2021-05-16 19:47:45 +05:30
dhrubesh
c6e2e297d5 resolves conflicts 2021-05-16 18:44:26 +05:30
dhrubesh
2bc01e50bd Merge branch 'issue-92' of github.com-dhrubesh:SigNoz/signoz into issue-92 2021-05-16 18:36:18 +05:30
dhrubesh
38770809e3 handles route specific default value connected to localstorage 2021-05-16 18:35:50 +05:30
Ankit Nayan
9dd9f1133b Merge pull request #104 from SigNoz/issue-103-
Fixes multiple re-renders
2021-05-16 17:23:55 +05:30
Ankit Nayan
8b743f7803 Merge branch 'issue-92' into issue-103- 2021-05-16 17:23:44 +05:30
Ankit Nayan
868b7691b3 Merge pull request #105 from SigNoz/issue-95
Calculate zoom px based on screen size
2021-05-16 17:21:38 +05:30
Ankit Nayan
613e6ba5f9 Merge pull request #106 from SigNoz/issue-97
Adds tooltip on hover
2021-05-16 17:21:25 +05:30
dhrubesh
8fe2fe5aec adds a utility function to transform label 2021-05-16 15:50:32 +05:30
dhrubesh
55a7b5b1b3 adds tooltip on hover 2021-05-16 15:08:31 +05:30
dhrubesh
8b0abbec79 adds default options config by route 2021-05-15 23:24:53 +05:30
dhrubesh
24416ceabd adds width to Select 2021-05-15 19:50:16 +05:30
dhrubesh
2482e91348 calculate zoom px based on screen size 2021-05-15 18:23:29 +05:30
dhrubesh
fcc248ddf6 resets data to avoid multiple re-rendering for parallel apis 2021-05-15 15:18:30 +05:30
dhrubesh
3318ec8c38 removes 1day and adds 5mins 2021-05-15 14:59:47 +05:30
dhrubesh
a416767950 choose config based on routes 2021-05-13 20:07:48 +05:30
dhrubesh
173bd01e70 adds last 1min to store 2021-05-13 20:07:25 +05:30
dhrubesh
de4adeded5 creates 2 diff config for datepicker 2021-05-13 20:06:44 +05:30
Ankit Anand
674fb34115 updated readme 2021-05-11 21:41:42 +05:30
Shweta Bhave
9c74f0bae5 updated readme 2021-05-11 21:34:32 +05:30
Ankit Nayan
2999adc98f added nodes without any dependencies in serviceMap 2021-05-11 13:15:10 +05:30
Ankit Nayan
be7d8c3347 fixed default 4xxErrorRate injected to test 2021-05-10 17:02:58 +05:30
Ankit Nayan
41dd007380 increased speed of particles in serviceMap 2021-05-10 12:07:38 +05:30
Ankit Nayan
83eb73ee03 changing deployment options to 0.2.1 2021-05-10 10:46:16 +05:30
Ankit Nayan
5b2f985710 Merge pull request #91 from SigNoz/disable-options
Disables invalid CTA, updates options based on API payload
2021-05-10 00:10:04 +05:30
dhrubesh
e9c03c4d85 p90->p95 2021-05-10 00:05:49 +05:30
Ankit Nayan
d07e277220 Merge pull request #90 from SigNoz/service-map
Service map view
2021-05-10 00:02:07 +05:30
dhrubesh
9bcdb2ede6 removes set alert 2021-05-10 00:01:50 +05:30
dhrubesh
4bbc4eef1a 399 --> 380 2021-05-09 23:57:50 +05:30
dhrubesh
36ad8987dd cosmetic updates 2021-05-09 23:45:42 +05:30
dhrubesh
45f1c2ec11 removes hardcoding 2021-05-09 23:03:51 +05:30
dhrubesh
705279b6fd fixes zoom issue 2021-05-09 23:02:16 +05:30
dhrubesh
9ac2dece11 UX updates 2021-05-09 22:51:08 +05:30
dhrubesh
325ca434d4 adds height variant 2021-05-09 20:47:56 +05:30
dhrubesh
128d75a144 fixes zoom px and disabledNodeDrag 2021-05-09 19:30:16 +05:30
dhrubesh
45375fbd53 fixes edge case 2021-05-09 19:12:54 +05:30
dhrubesh
2d646c0655 adds hardcoded data 2021-05-09 18:59:49 +05:30
dhrubesh
6f12d06a32 adds selection of service and zoom into node feature 2021-05-09 18:27:37 +05:30
dhrubesh
bc02aa5eef calculate nodes size and color via RPS errorRate 2021-05-09 15:41:57 +05:30
dhrubesh
c7ed2daf4a initial set up with react-force-graph 2021-05-09 14:44:14 +05:30
Pranay Prateek
5e97dfa5fc updated twitter handle for SigNoz 2021-05-09 01:26:25 +05:30
Ankit Nayan
44666a4944 changed p90 to p95 in service overview api 2021-05-06 17:59:54 +05:30
Ankit Nayan
14f6a23f51 Merge branch 'main' of https://github.com/signoz/signoz into main 2021-05-06 17:36:16 +05:30
Ankit Nayan
050b57c72b added ability to query tags with isnotnull operator 2021-05-06 17:35:29 +05:30
Ankit Nayan
0f891ccb26 added kind as field in model for span search 2021-05-06 17:34:55 +05:30
Ankit Nayan
b3755325ba added kind as url param in parser for span search 2021-05-06 17:34:30 +05:30
Pranay Prateek
a1468cf126 updated badges 2021-05-06 17:21:26 +05:30
Pranay Prateek
3014948f26 Merge pull request #85 from pranay01/main
updated badges on README
2021-05-06 17:18:28 +05:30
Pranay Prateek
1e1fc38c96 updated badges 2021-05-06 17:04:15 +05:30
Pranay Prateek
dad678a4c1 updated badges 2021-05-06 16:45:15 +05:30
Pranay Prateek
f91d8685e3 Merge pull request #84 from pranay01/main
updated docker pull badge
2021-05-06 13:18:13 +05:30
Pranay Prateek
50a2f3b6f9 updated docker pull badge 2021-05-06 13:16:41 +05:30
Pranay Prateek
97c7543557 Merge pull request #83 from pranay01/main
updated features section
2021-05-06 12:55:19 +05:30
Pranay Prateek
e4c8dcf3ca updated features section 2021-05-06 12:54:32 +05:30
Pranay Prateek
5a6158a2e5 Merge pull request #82 from pranay01/main
Updated ReadMe
2021-05-06 12:43:35 +05:30
Pranay Prateek
9936b3ab46 updated motivation section 2021-05-06 12:41:52 +05:30
Pranay Prateek
673d65db40 updated intro 2021-05-06 12:39:55 +05:30
Pranay Prateek
5e1592274c updated motivation and intro 2021-05-06 12:31:14 +05:30
Ankit Nayan
a50fd14ef2 fixed bug in External APIs error % mapping 2021-05-05 13:04:09 +05:30
Ankit Nayan
baedfa62d2 added service map api and 4xx rate in /services api 2021-05-05 00:03:57 +05:30
Ankit Nayan
daebe83e32 updated files for docker and helm files v 0.2.0 2021-05-02 23:17:50 +05:30
Ankit Nayan
7bafcdb3da Merge pull request #79 from SigNoz/prod-deployment-issues
fixes prod build issue
2021-05-02 23:06:58 +05:30
dhrubesh
2deb8e5b9d fixes prod build issue 2021-05-02 22:54:02 +05:30
Ankit Nayan
d7f2d9f58b added druid dimensions for external and db calls 2021-05-02 22:05:14 +05:30
Ankit Nayan
42b7a51080 Merge branch 'backend' into main 2021-05-02 21:49:16 +05:30
Ankit Nayan
c4bba43667 fixed externalCallAvgDuration API for sinusoidal pattern 2021-05-02 21:48:55 +05:30
Ankit Nayan
3f176cda8d Merge branch 'main' of https://github.com/signoz/signoz into main 2021-05-02 20:54:05 +05:30
Ankit Nayan
e00b4a503e Merge pull request #75 from SigNoz/fix-bug-link-err-traces
Fixes bugs on Tracing and Metrics
2021-05-02 20:52:24 +05:30
Ankit Nayan
30bdd6792c Merge branch 'backend' into main 2021-05-02 20:50:06 +05:30
Ankit Nayan
9c6e66f315 fixed - now getting latest data for past time rather than get 30s stale there too 2021-05-02 20:49:54 +05:30
dhrubesh
9010d16319 equal --> equals 2021-05-02 20:31:30 +05:30
Ankit Nayan
3ef7b10f5b Merge pull request #74 from SigNoz/update-err-chart-title
Change title in Error chart in ServiceOverview
2021-05-02 20:13:19 +05:30
Ankit Nayan
3294463599 Merge pull request #73 from SigNoz/update-graph-title
Change Title in DB Calls Tab
2021-05-02 20:12:23 +05:30
dhrubesh
fc2d32e72d removes *100 since the logic hsa been moved to BE 2021-05-02 20:04:12 +05:30
dhrubesh
62ad8433bf adds service name in the trace when visiting from error 2021-05-02 20:02:44 +05:30
dhrubesh
66ec0a2d8d updates DB graph title and label 2021-05-02 19:51:21 +05:30
dhrubesh
e767886565 updates DB graph title 2021-05-02 19:47:50 +05:30
Ankit Nayan
00c3342b4d added Error tag from error% mertics to traces page 2021-05-02 18:09:01 +05:30
Ankit Nayan
0e86e37235 Merge branch 'pull-66' into main 2021-05-02 18:00:53 +05:30
Ankit Nayan
1ac544ad78 pull-65 2021-05-02 17:55:06 +05:30
Ankit Nayan
c22d6dd1cc Merge branch 'pull-68' into main 2021-05-02 17:44:14 +05:30
Ankit Nayan
4113b1aacc Merge pull request #70 from SigNoz/fixes-side-nav-bug
Fixes sidebar highlight on route change
2021-05-02 17:38:03 +05:30
Ankit Nayan
501d4729d6 Merge pull request #69 from SigNoz/link-error-to-traces
Links Error chart from ServiceOverview to Traces Page
2021-05-02 17:37:47 +05:30
dhrubesh
63f0eadb61 fixes error percentage key 2021-05-02 17:11:02 +05:30
Ankit Nayan
c110a71fff Merge pull request #64 from SigNoz/adds-external-calls
Adds external API monitoring calls
2021-05-02 17:08:09 +05:30
dhrubesh
25803e660c integrated API, populates graph data 2021-05-02 16:58:34 +05:30
Ankit Nayan
7c6a4ed402 Merge branch 'main' of https://github.com/signoz/signoz into main 2021-05-02 16:52:04 +05:30
Ankit Nayan
eb39850f63 added statusCode filtering with error:true tag in searchSpans API 2021-05-02 16:51:06 +05:30
Ankit Nayan
409929841d added statusCode filter for error:true filter in searchSpans 2021-05-02 16:50:54 +05:30
dhrubesh
7aeffacaa5 links error to traces 2021-05-02 16:39:36 +05:30
Ankit Nayan
883982bf36 Merge pull request #67 from pranay01/main
Checking if mapped_array[parentid[ is undefined, before pushing a tree object to it.
2021-05-02 16:21:25 +05:30
dhrubesh
e78d979dd3 fixes sidebar highlight on route change 2021-05-02 16:06:31 +05:30
Pranay Prateek
613f62e518 Merge branch 'spantree_issue' into main 2021-05-02 15:46:43 +05:30
Pranay Prateek
b3bf9fe670 fixed undefined spanUtil mapped array error 2021-05-02 15:45:56 +05:30
dhrubesh
39012d86d7 updates Graph title and adds initial set up for db overview 2021-05-02 14:42:30 +05:30
dhrubesh
eafc2919c7 adds color tokens 2021-05-02 14:41:18 +05:30
Ankit Nayan
ee69c3aed2 Merge branch 'backend' into main 2021-05-02 12:53:12 +05:30
Ankit Nayan
a6b1c271ee changed errors to percent in external calls 2021-05-02 12:52:57 +05:30
Ankit Nayan
cd90ac8e72 Adding DB Overview tab in service 2021-05-02 10:55:22 +05:30
Ankit Nayan
ade18bc11f added dbOverview tab for service 2021-05-02 10:54:59 +05:30
dhrubesh
4935936afc adds plus minus 15mins to timestamp 2021-05-01 20:54:33 +05:30
dhrubesh
a59e33d241 links end points to traces 2021-05-01 20:27:46 +05:30
Ankit Nayan
d5e77d2c57 Merge branch 'pull-52' into main 2021-05-01 20:15:27 +05:30
Ankit Nayan
75c5615d10 Merge branch 'pull-51' into main 2021-05-01 20:15:09 +05:30
Ankit Nayan
b494b380db Merge branch 'pull-50' into main 2021-05-01 20:14:49 +05:30
dhrubesh
f412573972 updates label of graph 2021-05-01 19:18:18 +05:30
dhrubesh
ec52ad7636 adds error and external call duration graph 2021-05-01 19:13:31 +05:30
Ankit Nayan
3b3ca64296 returning spans which are atleast 30s stale 2021-05-01 18:25:46 +05:30
dhrubesh
61c26d7727 adds external call RPS and duration via address 2021-05-01 16:30:20 +05:30
Ankit Nayan
ea93b65ab7 fixed error data in APIs 2021-05-01 12:37:53 +05:30
Ankit Nayan
c323068362 fixing TagValues for int value 2021-05-01 12:37:29 +05:30
Ankit Nayan
9008815790 rerfactoring 2021-04-30 23:00:15 +05:30
Ankit Nayan
99b52e5f19 Merge branch 'main' of https://github.com/signoz/signoz into main 2021-04-30 22:57:58 +05:30
Ankit Nayan
f5abbd2e64 Merge branch 'backend' into main 2021-04-30 22:57:41 +05:30
Ankit Nayan
f32ece7788 Merge pull request #48 from SigNoz/time-fixes-on-trace
Timestamp related fixes on trace
2021-04-30 22:55:08 +05:30
Ankit Nayan
d76b2ba8d2 Kubernetes Deployment: increased storage of Historical pod to 20Gi 2021-04-30 11:32:20 +05:30
Ankit Nayan
93138c91e2 commented druid response log line 2021-04-26 23:33:38 +05:30
Ankit Nayan
f0e7614443 commented druid response log line 2021-04-26 23:33:20 +05:30
Ankit Nayan
d3911faebc fixed NaN response from Druid during span aggregates results 2021-04-26 23:20:22 +05:30
Ankit Nayan
584215b186 fixed NaN response from Druid during span aggregates results 2021-04-26 23:19:34 +05:30
Ankit Nayan
99c073d6da External API Calls commit for query-service and flatten-processor 2021-04-26 21:55:37 +05:30
Ankit Nayan
44304229cb added APIs for external calls 2021-04-26 21:55:11 +05:30
Ankit Nayan
381fcd710e added dimensions for external calls and db calls 2021-04-26 21:54:32 +05:30
dhrubesh-makeen
c8b92ce4d5 removes hardcoding--2 2021-04-25 17:44:07 +05:30
dhrubesh-makeen
d5cb191299 removes hardcoding from routes 2021-04-25 17:37:43 +05:30
Ankit Nayan
3f901c8692 added status code in processor and supervisor 2021-04-24 11:21:32 +05:30
Ankit Nayan
e24577b663 added statusCode dimension in druid supervisor config 2021-04-24 11:20:07 +05:30
Ankit Nayan
e9d403493f added statusCode field in spans 2021-04-24 11:19:34 +05:30
dhrubesh-makeen
fa7e3f3d95 adds spacing 2021-04-24 04:00:05 +05:30
dhrubesh-makeen
05f40224b9 window.location.search --> location.search(react-router) 2021-04-24 03:55:22 +05:30
dhrubesh-makeen
55e86ead02 moves routing to a single place 2021-04-24 03:51:31 +05:30
dhrubesh-makeen
515766452d updates incorrect import 2021-04-24 02:36:19 +05:30
dhrubesh-makeen
dbe3488ad1 components --> modules & refactored TopNav and SideNav 2021-04-24 02:29:12 +05:30
dhrubesh-makeen
88e488bdc7 revamps api layer 2021-04-24 01:51:45 +05:30
dhrubesh-makeen
001f7414db moves store to a separate folder 2021-04-24 01:21:24 +05:30
dhrubesh-makeen
1d65ed38f9 adds /application to open when dev server starts 2021-04-24 00:59:45 +05:30
dhrubesh-makeen
e1ea39e287 updates timeformat 2021-04-24 00:58:59 +05:30
dhrubesh-makeen
079d742ea0 moves old PR changes to this PR 2021-04-24 00:58:38 +05:30
Ankit Nayan
733b137b2a Fixed timezone issue in timestamp conversion to string in query service 2021-04-23 13:37:55 +05:30
Ankit Nayan
1dad2bb791 fixed query service timestamp to string conversion timezone issue 2021-04-23 13:37:32 +05:30
Pranay Prateek
840246bd77 Merge pull request #46 from pranay01/main
Updated ReadMe image & title
2021-04-20 13:25:00 +05:30
Pranay Prateek
4403a5c334 updated README image 2021-04-20 13:23:37 +05:30
Pranay Prateek
3d7b79fecd updated README image 2021-04-20 13:12:03 +05:30
Pranay Prateek
ba95b36e09 updated README image 2021-04-20 13:06:31 +05:30
Pranay Prateek
15471090d5 updated image & title in README 2021-04-20 01:28:06 +05:30
Ankit Nayan
881feef4e9 changed TagKeys and TagValues type for correct ordering 2021-04-19 23:06:20 +05:30
Ankit Nayan
c45f09be08 added separate kafka exporters for traces and metrics in otel-collector config 2021-04-15 21:40:30 +05:30
Ankit Nayan
35648ba195 added s3 config test file to .gitignore 2021-04-02 10:53:55 +05:30
Ankit Nayan
eaa50f83bc docker installation command changed for Linux2 AMI 2021-04-02 10:37:21 +05:30
Ankit Nayan
c4d3f7fd2a adding amazon linux docker installation 2021-04-01 22:12:50 +05:30
Ankit Nayan
f0497bbbd4 adding amazon linux docker installation 2021-04-01 22:04:39 +05:30
Ankit Nayan
47825ef2ad adding amazon linux docker installation 2021-04-01 21:58:58 +05:30
Ankit Nayan
f3ce9e92f4 adding amazon linux docker installation 2021-04-01 21:56:03 +05:30
Ankit Nayan
4e9e4f25cc adding amazon linux docker installation 2021-04-01 21:41:25 +05:30
Ankit Nayan
d820d6462a adding separate s3 configs 2021-04-01 21:12:45 +05:30
Ankit Nayan
87171cc880 adding separate s3 configs 2021-04-01 20:44:29 +05:30
Ankit Nayan
9d53fc6055 adding separate s3 configs 2021-04-01 20:11:33 +05:30
Ankit Nayan
8419d113f4 adding separate s3 configs 2021-04-01 19:48:27 +05:30
Ankit Nayan
2173ab6775 adding separate s3 configs 2021-04-01 19:23:24 +05:30
Ankit Nayan
bb67ac9b55 adding separate s3 configs 2021-04-01 19:22:26 +05:30
Ankit Nayan
6d78104d03 adding separate s3 configs 2021-04-01 18:36:53 +05:30
Ankit Nayan
812752972f adding separate s3 configs 2021-04-01 17:52:52 +05:30
Ankit Nayan
5d53065519 adding separate s3 configs 2021-04-01 17:45:06 +05:30
Ankit Nayan
0fd39e17b7 adding separate s3 configs 2021-04-01 17:30:01 +05:30
Ankit Nayan
2b55458e30 adding separate s3 configs 2021-04-01 17:23:59 +05:30
Ankit Nayan
d414eaec72 adding separate s3 configs 2021-04-01 17:22:31 +05:30
Ankit Nayan
6334650d22 adding separate s3 configs 2021-04-01 16:48:45 +05:30
Ankit Nayan
ccf57d9c5c adding s3 config to docker-compose 2021-04-01 12:13:04 +05:30
EC2 Default User
a23520782c adding s3 config to docker deployment 2021-03-31 18:50:27 +00:00
Ankit Nayan
86698a50bb exposed grpc legacy port 55680 in docker deployment 2021-03-27 00:06:31 +05:30
Ankit Nayan
00c744c004 added scarf for docker 2021-03-22 12:17:00 +05:30
Ankit Nayan
67dda78cbe added metrics pipeline in otel collector config 2021-03-03 00:48:04 +05:30
Ankit Nayan
62e0ec7ea4 fixed typos in readme 2021-03-03 00:47:33 +05:30
Ankit Nayan
b43d2a7567 changed query-service image tag to 0.1.4 2021-03-01 02:45:01 +05:30
Ankit Nayan
b6c718a536 added cors 2021-03-01 02:43:05 +05:30
Ankit Nayan
96d012a34b removed span.Kind=2 check in filtered spans aggregates 2021-03-01 01:36:33 +05:30
Ankit Nayan
bd8d50bab9 opening docs in new tab 2021-02-26 21:17:44 +05:30
Ankit Nayan
d9055b5030 Merge pull request #31 from himanshu-source21/ft-saas-opensource-parity-1
Add null check in GenericVisualization
2021-02-24 12:43:16 +05:30
Himanshu DIxit
52b7d38df8 Add null check in GenericVisualization 2021-02-24 12:37:53 +05:30
Ankit Nayan
4d431f0476 Merge pull request #30 from himanshu-source21/ft-saas-opensource-parity-1
Fix latency initial values in traces page
2021-02-24 11:05:59 +05:30
Himanshu DIxit
caeeec803e Fix initial values 2021-02-24 01:46:20 +05:30
Ankit Nayan
2c4dc07d2d changed node version to 12 2021-02-23 22:12:54 +05:30
Ankit Nayan
2234d31974 changed signoz/frontend image tag 2021-02-23 22:12:36 +05:30
Ankit Nayan
917e397c97 added grpc port 4317 2021-02-23 22:12:10 +05:30
Ankit Nayan
aa320386a2 changed signoz/frontend image tag 2021-02-23 22:11:32 +05:30
Ankit Nayan
2169f5498c Merge pull request #29 from himanshu-source21/ft-saas-opensource-parity-1
Remove unused ref
2021-02-23 20:21:00 +05:30
Himanshu DIxit
dd5357b975 Remove unused ref 2021-02-23 20:19:05 +05:30
Ankit Nayan
cb59805ff0 Merge pull request #28 from himanshu-source21/ft-saas-opensource-parity-1
Forcefully add env file
2021-02-23 20:18:35 +05:30
Himanshu DIxit
c708b29657 Remove unused var and change baseURl 2021-02-23 20:15:54 +05:30
Himanshu DIxit
2d238ff6a2 Forcefully add env file 2021-02-23 20:09:28 +05:30
Ankit Nayan
7b778d6951 Merge pull request #26 from himanshu-source21/ft-saas-opensource-parity-1
Fix references error found during deployment
2021-02-23 19:17:57 +05:30
Himanshu DIxit
e4dbb323a5 Fix references error found during deployment 2021-02-23 17:28:18 +05:30
Ankit Nayan
f478a6e894 Merge branch 'main' of https://github.com/signoz/signoz into main 2021-02-23 17:19:49 +05:30
Ankit Nayan
88a756fe50 k8s installation script in progress 2021-02-23 17:18:50 +05:30
Ankit Nayan
ae2dfe59d9 Merge pull request #25 from himanshu-source21/ft-saas-opensource-parity-1
Ft saas opensource parity 1
2021-02-23 16:54:06 +05:30
Himanshu DIxit
fb1ade15b5 Fix final changes 2021-02-23 16:46:58 +05:30
Himanshu DIxit
4ec389c449 Fix multiple keys selected bug 2021-02-22 05:14:59 +05:30
Himanshu DIxit
cb5713216a Add SIG-60 2021-02-22 05:05:07 +05:30
Himanshu DIxit
3a79778ce4 Fix SIG-21 2021-02-22 04:44:34 +05:30
Himanshu DIxit
81a1d2bb37 Fix SIG-55 2021-02-22 04:22:11 +05:30
Himanshu DIxit
864ef41fef Fix SIG-58 2021-02-22 03:58:57 +05:30
Himanshu DIxit
999a5094bb Prettify: Add basic indentation hygiene 2021-02-21 06:23:56 +05:30
Himanshu Dixit
a1331536ca Refactor: Bring open source to parity with SAAS 2021-02-21 06:21:15 +05:30
Ankit Nayan
3795aa059e enter key escaped in read 2021-02-18 01:03:50 +05:30
Ankit Nayan
3c9b024e34 text changes 2021-02-17 01:19:51 +05:30
Ankit Nayan
2ff2b7485e adding installation script
Merge branch 'backend' into main
2021-02-16 23:43:40 +05:30
Ankit Nayan
19dff5fdf2 checks done 2021-02-16 23:30:51 +05:30
Ankit Nayan
7bfc184ee6 checks done 2021-02-16 23:10:36 +05:30
Ankit Nayan
4954c18baa fixed text 2021-02-16 12:27:44 +05:30
Ankit Nayan
65a4649696 fixed text 2021-02-16 12:25:21 +05:30
Ankit Nayan
03233cf6be added installaton success and error ping 2021-02-16 12:23:32 +05:30
Ankit Nayan
1176f61791 status_code var fixed 2021-02-16 11:53:39 +05:30
Ankit Nayan
61f0674a13 added install.sh 2021-02-16 11:02:57 +05:30
Ankit Nayan
364c68b138 added 4317 port for OTLP 2021-02-14 12:34:12 +05:30
Ankit Nayan
1de802688c added 4317 port for otlp 2021-02-14 12:33:59 +05:30
Ankit Nayan
cf58f77400 added zipkin receiver 2021-02-14 12:16:43 +05:30
Ankit Nayan
b96e1b5466 Merge branch 'backend' into main 2021-02-10 01:40:17 +05:30
Ankit Nayan
b4073dfaa8 updated README.md with docker instructions 2021-02-10 01:40:01 +05:30
Ankit Nayan
14ac30a79d Merge branch 'main' of https://github.com/signoz/signoz into main 2021-02-09 03:05:23 +05:30
Ankit Nayan
d526e15fa8 added batch size in otel collector config 2021-02-09 03:04:50 +05:30
Ankit Nayan
f7de4fcbd9 Merge pull request #20 from SigNoz/pranay01-patch-3
Update README.md
2021-02-08 00:54:59 +05:30
Pranay Prateek
27b6024d2a Update README.md 2021-02-08 00:53:14 +05:30
Ankit Nayan
c145f92125 otel collector image changed to 0.18.0 2021-02-07 01:09:59 +05:30
Ankit Nayan
cde268c40e changed otel image version to 0.19.0 2021-02-07 01:05:07 +05:30
Ankit Nayan
78bb92827b Merge branch 'main' of https://github.com/signoz/signoz into main 2021-02-05 13:55:13 +05:30
Ankit Nayan
1a0fa0900d Merge branch 'backend' into main 2021-02-05 13:54:37 +05:30
Ankit Nayan
6f9a33d6b4 add s3 config in comments 2021-02-05 13:53:58 +05:30
Ankit Nayan
beb4ba512a Merge pull request #11 from himanshu-source21/ft-carry-forward-service-name
Ft carry forward service name from metrics page -> traces page
2021-01-24 18:32:22 +05:30
Ankit Nayan
712f825525 tMerge branch 'backend' into main 2021-01-24 14:05:42 +05:30
Ankit Nayan
bf51c2948b changed README.md 2021-01-24 14:01:50 +05:30
Ankit Nayan
40bae79a35 added .gitattriibutes file 2021-01-24 13:09:49 +05:30
Ankit Nayan
bf79d537d7 Merge pull request #10 from SigNoz/pranay01-patch-1
Updated Logo in  README.md
2021-01-24 13:03:42 +05:30
“himanshu”
53a50efa00 Reset env 2021-01-23 11:12:04 +05:30
“himanshu”
bdd23c504c Fix key rendering issue 2021-01-23 10:16:10 +05:30
“himanshu”
e98e3be33e Preserve state in latencymodalform 2021-01-23 10:05:12 +05:30
“himanshu”
5ed648bf0e Merge branch 'main' of https://github.com/SigNoz/signoz into ft-carry-forward-service-name 2021-01-23 09:55:51 +05:30
“himanshu”
b0dd622aa3 Remove unused var 2021-01-23 09:28:45 +05:30
“himanshu”
1ebfa0679e Add support for service from metrics to trace page 2021-01-23 09:25:35 +05:30
Pranay Prateek
a3ea50401d Update README.md 2021-01-22 13:28:14 +05:30
Ankit Nayan
21c0de6f77 Merge pull request #9 from SigNoz/add-code-of-conduct-1
Added code of conduct
2021-01-22 11:51:41 +05:30
Pranay Prateek
254e2f1532 Added code of conduct 2021-01-22 11:50:17 +05:30
Ankit Nayan
307c26ae50 Merge pull request #8 from pranay01/main
improving flamegraph tooltip vis
2021-01-22 10:59:36 +05:30
Pranay Prateek
4ccbe6fdd6 improving flamegraph tooltip vis 2021-01-22 01:22:47 +05:30
Ankit Nayan
f8a05e535f Merge pull request #7 from himanshu-source21/ft-traces-time-interval
Ft traces time interval
2021-01-21 00:37:03 +05:30
“himanshu”
08ba714637 Change env 2021-01-21 00:30:10 +05:30
“himanshu”
7f495181a7 View traces fix for past 15 min interval 2021-01-21 00:27:54 +05:30
Ankit Nayan
4c629721bd changing deployments 2021-01-20 19:32:21 +05:30
Ankit Nayan
7e9fe17e76 docker druid environment changes 2021-01-20 14:43:35 +05:30
Ankit Nayan
56dc0824c8 changed folder for data storage in druid 2021-01-20 13:23:42 +05:30
Ankit Nayan
cd16bf43bd Merge pull request #6 from himanshu-source21/main
Fix Sig-13, Fix-11, Logo and lint
2021-01-20 11:48:45 +05:30
“himanshu”
23a0059e41 Revert image name in docker compose 2021-01-20 11:29:39 +05:30
“himanshu”
546af6e46b Gitignore env and Add redux dev tools 2021-01-20 04:45:52 +05:30
“himanshu”
a6811aca6b Merge branch 'main' of https://github.com/SigNoz/signoz into main 2021-01-20 04:40:55 +05:30
“himanshu”
d9b0c1da1c Create docker image and tag 2021-01-20 03:59:58 +05:30
Ankit Nayan
8930ff1c88 added druid environments for small instance 2021-01-19 18:30:17 +05:30
“himanshu”
6cf2fb5490 Add Dockerfile 2021-01-19 11:10:05 +05:30
“himanshu”
14a585641c Fix flamegraph 2021-01-19 10:26:49 +05:30
“himanshu”
2a039150a8 Use API status for loading 2021-01-19 06:01:57 +05:30
“himanshu”
b7dea68ff5 Add loading for table 2021-01-19 05:56:45 +05:30
“himanshu”
5605a6210f Fix image use base ref 2021-01-19 05:22:34 +05:30
Ankit Nayan
61a8a9c17b increased retries to 10 for kafka 2021-01-18 19:07:59 +05:30
Ankit Nayan
db73cd1cdd added docker-compose-tiny with environment to run on 3GB RAM 2021-01-18 16:09:15 +05:30
Ankit Nayan
38335cbd4c added storage folders for local development 2021-01-18 16:08:29 +05:30
Ankit Nayan
6d50599ba0 restarting pods set-retention and create-supervisor on failure 2021-01-18 12:37:25 +05:30
Himanshu Dixit
cc6f755f07 Commit uncommitted changes 2021-01-18 02:43:22 +05:30
Himanshu Dixit
4bd1790a52 Refactor API endpoint 2021-01-18 02:33:48 +05:30
Himanshu Dixit
2505e01fce Exclude idea and fix theme toggle 2021-01-18 02:23:46 +05:30
Himanshu Dixit
5ff2d9e9e7 Sanity prettify 2021-01-18 02:18:49 +05:30
Himanshu Dixit
ff47f0978f Fix flamegraph.
// Alternative fix, useRef or createRef with reference to DOM chart. Weird way to handle it, looks like it's relying on immutability. Ideally any componentLibrary will use useRef for opposite data transfer.
2021-01-17 20:48:28 +05:30
Ankit Nayan
67125542a9 updated deployment link 2021-01-17 19:17:20 +05:30
Ankit Nayan
6bd8bf7d9f changed backofflimit of jobs 2021-01-17 11:46:01 +05:30
Ankit Nayan
c7399bc8e7 added OTLP HTTP receiver 2021-01-17 03:07:15 +05:30
Ankit Nayan
5f796a982b added zookeeper auto purge interval to prevent full disk space error 2021-01-16 14:02:25 +05:30
Ankit Nayan
416d943d14 added docker compose 2021-01-16 12:18:54 +05:30
490 changed files with 31666 additions and 9503 deletions

1
.gitattributes vendored Normal file
View File

@@ -0,0 +1 @@
*.css linguist-detectable=false

33
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,33 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
## Bug description
*Please describe.*
*If this affects the front-end, screenshots would be of great help.*
## Expected behavior
## How to reproduce
1.
2.
3.
## Version information
* **Signoz version**:
* **Browser version**:
* **Your OS and version**:
## Additional context
#### *Thank you* for your bug report we love squashing them!

View File

@@ -0,0 +1,27 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
## Is your feature request related to a problem?
*Please describe.*
## Describe the solution you'd like
## Describe alternatives you've considered
## Additional context
Add any other context or screenshots about the feature request here.
#### *Thank you* for your feature request we love each and every one!

View File

@@ -0,0 +1,33 @@
---
name: Performance issue report
about: Long response times, high resource usage? Ensuring that SigNoz is scalable
is our top priority
title: ''
labels: ''
assignees: ''
---
## In what situation are you experiencing subpar performance?
*Please describe.*
## How to reproduce
1.
2.
3.
## Your Environment
- [ ] Linux
- [ ] Mac
- [ ] Windows
Please provide details of OS version etc.
## Additional context
#### *Thank you* for your performance issue report we want SigNoz to be blazing fast!

29
.github/release-drafter.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name-template: 'v$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
template: |
# What's Changed
$CHANGES
autolabeler:
- label: 'chore'
title:
- '/chore/i'
- label: 'bug'
title:
- '/fix/i'
- label: 'enhancement'
title:
- '/feat/i'
categories:
- title: '🚀 Features'
label: 'enhancement'
- title: '🐛 Bug Fixes'
labels:
- 'bug'
- title: '🧰 Maintenance'
label: 'chore'
- title: 'Breaking'
label: 'breaking'
exclude-labels:
- 'skip-changelog'

30
.github/workflows/README.md vendored Normal file
View File

@@ -0,0 +1,30 @@
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
#### Environment Variables
<table>
<tr>
<th> Variables </th>
<th> Description </th>
<th> Example </th>
</tr>
<tr>
<td> REPONAME </td>
<td> Provide the DockerHub user/organisation name of the image. </td>
<td> signoz</td>
</tr>
<tr>
<td> DOCKERHUB_USERNAME </td>
<td> Docker hub username </td>
<td> signoz</td>
</tr>
<tr>
<td> DOCKERHUB_TOKEN </td>
<td> Docker hub password/token with push permission </td>
<td> **** </td>
</tr>
<tr>
<td> SONAR_TOKEN </td>
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
<td> **** </td>
</tr>

77
.github/workflows/build.yaml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: build-pipeline
on:
pull_request:
branches:
- main
- v*
paths:
- 'pkg/**'
- 'frontend/**'
jobs:
get_filters:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
frontend: ${{ steps.filter.outputs.frontend }}
query-service: ${{ steps.filter.outputs.query-service }}
flattener: ${{ steps.filter.outputs.flattener }}
steps:
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@v2
id: filter
with:
filters: |
frontend:
- 'frontend/**'
query-service:
- 'pkg/query-service/**'
flattener:
- 'pkg/processors/flattener/**'
build-frontend:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.frontend == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install dependencies
run: cd frontend && yarn install
- name: Run Prettier
run: cd frontend && npm run prettify
continue-on-error: true
- name: Run ESLint
run: cd frontend && npm run lint
continue-on-error: true
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-query-service:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.query-service == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build query-service image
shell: bash
run: |
make build-flattener-amd64
build-flattener:
runs-on: ubuntu-latest
needs:
- get_filters
if: ${{ needs.get_filters.outputs.flattener == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build flattener docker image
shell: bash
run: |
make build-query-service-amd64

71
.github/workflows/codeql.yaml vendored Normal file
View File

@@ -0,0 +1,71 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main, v* ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '32 5 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

18
.github/workflows/commitlint.yml vendored Normal file
View File

@@ -0,0 +1,18 @@
name: commitlint
on: [pull_request]
defaults:
run:
working-directory: frontend
jobs:
lint-commits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.1
with:
# we actually need "github.event.pull_request.commits + 1" commit
fetch-depth: 0
- uses: actions/setup-node@v2.1.0
# or just "yarn" if you depend on "@commitlint/cli" already
- run: yarn add @commitlint/cli
- run: yarn add @commitlint/config-conventional
- run: yarn run commitlint --config ./node_modules/@commitlint/config-conventional/index.js --from HEAD~${{ github.event.pull_request.commits }} --to HEAD

172
.github/workflows/push.yaml vendored Normal file
View File

@@ -0,0 +1,172 @@
name: push-pipeline
on:
push:
branches:
- main
- ^v[0-9]*.[0-9]*.x$
tags:
- "*"
# pull_request:
# branches:
# - main
# - v*
# paths:
# - 'pkg/**'
# - 'frontend/**'
jobs:
get-envs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- shell: bash
run: |
img_tag=""
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ ${array[1]} == "tags" ]
then
echo "tag build"
img_tag=${GITHUB_REF#refs/*/v}
elif [ ${array[1]} == "pull" ]
then
img_tag="pull-${{ github.event.number }}"
else
echo "non tag build"
img_tag="latest"
fi
# This is a condition where image tag looks like "pull/<pullrequest-name>" during pull request build
NEW_IMG_TAG=`echo $img_tag | sed "s/\//-/g"`
echo $NEW_IMG_TAG
echo export IMG_TAG=$NEW_IMG_TAG >> env-vars
echo export FRONTEND_IMAGE="frontend" >> env-vars
echo export QUERY_SERVICE="query-service" >> env-vars
echo export FLATTENER_PROCESSOR="flattener-processor" >> env-vars
- name: Uploading envs
uses: actions/upload-artifact@v2
with:
name: env_artifact
path: env-vars
build-and-push-frontend:
runs-on: ubuntu-latest
needs:
- get-envs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Downloading image artifact
uses: actions/download-artifact@v2
with:
name: env_artifact
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Push Frontend Docker Image
shell: bash
env:
FRONTEND_DIRECTORY: "frontend"
REPONAME: ${{ secrets.REPONAME }}
FRONTEND_DOCKER_IMAGE: ${FRONTEND_IMAGE}
DOCKER_TAG: ${IMG_TAG}
run: |
branch=${GITHUB_REF#refs/*/}
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
then
source env-vars
make build-push-frontend
fi
build-and-push-query-service:
runs-on: ubuntu-latest
needs:
- get-envs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Downloading image artifact
uses: actions/download-artifact@v2
with:
name: env_artifact
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Push Query Service Docker Image
shell: bash
env:
QUERY_SERVICE_DIRECTORY: "pkg/query-service"
REPONAME: ${{ secrets.REPONAME }}
QUERY_SERVICE_DOCKER_IMAGE: ${QUERY_SERVICE}
DOCKER_TAG: ${IMG_TAG}
run: |
branch=${GITHUB_REF#refs/*/}
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] ||[[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
then
source env-vars
make build-push-query-service
fi
build-and-push-flattener:
runs-on: ubuntu-latest
needs:
- get-envs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Downloading image artifact
uses: actions/download-artifact@v2
with:
name: env_artifact
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Push Flattener Processor Docker Image
shell: bash
env:
FLATTENER_DIRECTORY: "pkg/processors/flattener"
REPONAME: ${{ secrets.REPONAME }}
FLATTERNER_DOCKER_IMAGE: ${FLATTENER_PROCESSOR}
DOCKER_TAG: ${IMG_TAG}
run: |
branch=${GITHUB_REF#refs/*/}
array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`)
if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]]
then
source env-vars
make build-push-flattener
fi

29
.github/workflows/release-drafter.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Release Drafter
on:
push:
# branches to consider in the event; optional, defaults to all
branches:
- main
# pull_request event is required only for autolabeler
pull_request:
# Only following types are handled by the action, but one can default to all as well
types: [opened, reopened, synchronize]
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
# (Optional) GitHub Enterprise requires GHE_HOST variable set
#- name: Set GHE_HOST
# run: |
# echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV
# Drafts your next Release notes as Pull Requests are merged into "master"
- uses: release-drafter/release-drafter@v5
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
# with:
# config-name: my-config.yml
# disable-autolabeler: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

27
.github/workflows/sonar.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: sonar
on:
pull_request:
branches:
- main
- v*
paths:
- 'frontend/**'
defaults:
run:
working-directory: frontend
jobs:
sonar-analysis:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Sonar analysis
uses: sonarsource/sonarcloud-github-action@master
with:
projectBaseDir: frontend
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

20
.gitignore vendored
View File

@@ -1,3 +1,8 @@
node_modules
yarn.lock
package.json
deploy/docker/environment_tiny/common_test
frontend/node_modules
frontend/.pnp
*.pnp.js
@@ -19,7 +24,20 @@ frontend/.yarnclean
frontend/npm-debug.log*
frontend/yarn-debug.log*
frontend/yarn-error.log*
frontend/src/constants/env.ts
frontend/cypress/**/*.mp4
# env file for cypress
frontend/cypress.env.json
.idea
**/.vscode
*.tgz
**/build
**/build
**/storage
**/locust-scripts/__pycache__/
**/__debug_bin
frontend/*.env
pkg/query-service/signoz.db

76
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at dev@signoz.io. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

View File

@@ -1,9 +1,76 @@
# How to Contribute
You can always reach out to ankit@signoz.io to understand more about the repo and product. We are very responsive over email and [slack](https://signoz-community.slack.com/join/shared_invite/zt-kj26gm1u-Xe3CYxCu0bGXCrCqKipjOA#/).
There are primarily 3 areas in which you can contribute in SigNoz
- Frontend ( written in Typescript, React)
- Query Service (written in Go)
- Flattener Processor (written in Go)
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
# Develop Frontend
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
### Contribute to Frontend with Docker installation of SigNoz
- `git clone https://github.com/SigNoz/signoz.git && cd signoz`
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38`
- run `cd deploy && docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend service)
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
- `yarn install`
- `yarn dev`
### Contribute to Frontend without installing SigNoz backend
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `<test environment URL>`
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
- `yarn install`
- `yarn dev`
**_Frontend should now be accessible at `http://localhost:3000/application`_**
# Contribute to Query-Service
Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)
### To run ClickHouse setup (recommended for local development)
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/deploy`
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L22`
- Run `docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend and query-service)
- `STORAGE=clickhouse ClickHouseUrl=tcp://localhost:9001 go run main.go`
**_Query Service should now be available at `http://localhost:8080`_**
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080`
# Contribute to Flattener Processor
Not needed to run for the ClickHouse setup
more info at [https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener](https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener)
## General Instructions
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
- You can create a PR (Pull Request)
- If you find any bugs, please create an issue
- If you find anything missing in documentation, you can create an issue with label **documentation**
- If you want to build any new feature, please create an issue with label `enhancement`
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
#### If you want to build any new feature, please create an issue with label `enhancement`
### Conventions to follow when submitting commits, PRs
1. We try to follow https://www.conventionalcommits.org/en/v1.0.0/
More specifically the commits and PRs should have type specifiers prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend - PR name should be prefixed with `fix(FE):`
2. Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows
3. Feel free to ping us on `#contributing` or `#contributing-frontend` on our slack community if you need any help on this :)

69
Makefile Normal file
View File

@@ -0,0 +1,69 @@
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
#
# Internal variables or constants.
#
FRONTEND_DIRECTORY ?= frontend
FLATTENER_DIRECTORY ?= pkg/processors/flattener
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
REPONAME ?= signoz
DOCKER_TAG ?= latest
FRONTEND_DOCKER_IMAGE ?= frontend
FLATTERNER_DOCKER_IMAGE ?= query-service
QUERY_SERVICE_DOCKER_IMAGE ?= flattener-processor
all: build-push-frontend build-push-query-service build-push-flattener
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64:
@echo "------------------"
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend:
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of flattener
.PHONY: build-flattener-amd64 build-push-flattener
# Step to build docker image of flattener in amd64 (used in build pipeline)
build-flattener-amd64:
@echo "------------------"
@echo "--> Building flattener docker image for amd64"
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
# Step to build and push docker image of flattener in amd64 (used in push pipeline)
build-push-flattener:
@echo "------------------"
@echo "--> Building and pushing flattener docker image"
@echo "------------------"
@cd $(FLATTENER_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) .

160
README.de-de.md Normal file
View File

@@ -0,0 +1,160 @@
<p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">Überwache deine Anwendungen und behebe Probleme in deinen bereitgestellten Anwendungen. SigNoz ist eine Open Source Alternative zu DataDog, New Relic, etc.</p>
</p>
<p align="center">
<img alt="Lizenz" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p>
<h3 align="center">
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
##
SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren bereitgestellten Anwendungen zu beheben. SigNoz benutzt verteilte Einzelschritt-Fehlersuchen, um Einblick in deinen Software-Stack zu bekommen.
👉 Du kannst Werte wie die P99-Latenz und die Fehler Häufigkeit von deinen Services, externen API Aufrufen und einzelnen Endpunkten sehen.
👉 Du kannst die Ursache des Problems finden, indem du zu dem Einzelschritt gehst, der das Problem verursacht und dir detaillierte Flamegraphs von einzelnen Abfragefehlersuchen anzeigen lassen.
👉 Erstelle Aggregate auf Basis von Fehlersuche Daten, um geschäftsrelevante Metriken zu erhalten.
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Werde Teil unserer Slack Community
Sag Hi zu uns auf [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Funktionen:
- Übersichtsmetriken deiner Anwendung wie RPS, 50tes/90tes/99tes Quantil Latenzen und Fehler Häufigkeiten.
- Übersicht der langsamsten Endpunkte deiner Anwendung.
- Sieh dir die genaue Einzelschritt-Fehlersuche deiner Abfrage an, um Fehler in nachgelagerten Diensten, langsamen Datenbank Abfragen und Aufrufen von Drittanbieter Diensten wie Zahlungsportalen, etc. zu finden.
- Filtere Einzelschritt-Fehlersuchen nach Dienstname, Latenz, Fehler, Stichworten/ Anmerkungen.
- Führe Aggregate auf Basis von Einzelschritt-Fehlersuche Daten (Ereignisse/Abstände) aus, um geschäftsrelevante Metriken zu erhalten. Du kannst dir z. B. die Fehlerrate und 99tes Quantil Latenz von `customer_type: gold`, `deployment_version: v2` oder `external_call: paypal` ausgeben lassen.
- Einheitliche Benutzeroberfläche für Metriken und Einzelschritt-Fehlersuchen. Du musst nicht zwischen Prometheus und Jaeger hin und her wechseln, um Fehler zu beheben.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Wieso SigNoz?
Als Entwickler fanden wir es anstrengend, uns für jede kleine Funktion, die wir haben wollten, auf Closed Source SaaS Anbieter verlassen zu müssen. Closed Source Anbieter überraschen ihre Kunden zum Monatsende oft mit hohen Rechnungen, die keine Transparenz bzgl. der Kostenaufteilung bieten.
Wir wollten eine selbst gehostete, Open Source Variante von Lösungen wie DataDog, NewRelic für Firmen anbieten, die Datenschutz und Sicherheitsbedenken haben, bei der Weitergabe von Kundendaten an Drittanbieter.
Open Source gibt dir außerdem die totale Kontrolle über deine Konfiguration, Stichprobenentnahme und Betriebszeit. Du kannst des Weiteren neue Module auf Basis von SigNoz bauen, die erweiterte, geschäftsspezifische Funktionen anbieten.
### Unterstützte Programmiersprachen:
Wir unterstützen [OpenTelemetry](https://opentelemetry.io) als die Software Library, die du nutzen kannst um deine Anwendungen auszuführen. Jedes Framework und jede Sprache die von OpenTelemetry unterstützt wird, wird auch von SigNoz unterstützt. Einige der unterstützten, größeren Programmiersprachen sind:
- Java
- Python
- NodeJS
- Go
Hier findest du die vollständige Liste von unterstützten Programmiersprachen - https://opentelemetry.io/docs/
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Erste Schritte mit SigNoz
### Bereitstellung mit Docker
Bitte folge den [hier](https://signoz.io/docs/deployment/docker/) aufgelisteten Schritten um deine Anwendung mit Docker bereitzustellen.
Die [Anleitungen zur Fehlerbehebung](https://signoz.io/docs/deployment/troubleshooting) könnten hilfreich sein, falls du auf irgendwelche Schwierigkeiten stößt.
<p>&nbsp </p>
### Bereitstellung mit Kubernetes und Helm
Bitte folge den [hier](https://signoz.io/docs/deployment/helm_chart) aufgelisteten Schritten, um deine Anwendung mit Helm Charts bereitzustellen.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Vergleiche mit anderen Lösungen
### SigNoz vs. Prometheus
Prometheus ist gut, falls du dich nur für Metriken interessierst. Wenn du eine nahtlose Integration von Metriken und Einzelschritt-Fehlersuchen haben möchtest, ist die Kombination aus Prometheus und Jaeger nicht das Richtige für dich.
Unser Ziel ist es, eine integrierte Benutzeroberfläche aus Metriken und Einzelschritt-Fehlersuchen anzubieten, ähnlich wie es SaaS Anbieter wie Datadog tun, mit der Möglichkeit von erweitertem filtern und aggregieren von Fehlersuchen. Etwas, was in Jaeger aktuell fehlt.
<p>&nbsp </p>
### SigNoz vs. Jaeger
Jaeger kümmert sich nur um verteilte Einzelschritt-Fehlersuche. SigNoz erstellt sowohl Metriken als auch Einzelschritt-Fehlersuche, daneben haben wir auch Protokoll Verwaltung auf unserem Plan.
Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
- Jaeger UI zeigt keine Metriken für Einzelschritt-Fehlersuchen oder für gefilterte Einzelschritt-Fehlersuchen an
- Jaeger erstellt keine Aggregate für gefilterte Einzelschritt-Fehlersuchen, z. B. die P99 Latenz von Abfragen mit dem Tag - customer_type='premium', was hingegen mit SigNoz leicht umsetzbar ist.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Zum Projekt beitragen
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Dokumentation
Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unverständlich ist oder fehlt, öffne gerne ein Github Issue mit dem Label `documentation` oder schreib uns über den Community Slack Channel.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Community
Werde Teil der [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.
Wie immer, danke an unsere großartigen Unterstützer!
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>

205
README.md
View File

@@ -1,69 +1,162 @@
<p align="center"><img src="https://signoz.io/img/SigNozLogo-200x200.svg" alt="SigNoz Logo" width="100"></p>
<p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
# SigNoz
SigNoz is an opensource observability platform. SigNoz uses distributed tracing to gain visibility into your systems and powers data using [Kafka](https://kafka.apache.org/) (to handle high ingestion rate and backpressure) and [Apache Druid](https://druid.apache.org/) (Apache Druid is a high performance real-time analytics database), both proven in industry to handle scale.
<p align="center">Monitor your applications and troubleshoot problems in your deployed applications, an open-source alternative to DataDog, New Relic, etc.</p>
</p>
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p>
<h3 align="center">
<a href="https://signoz.io/docs"><b>Documentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
##
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
👉 You can find the root cause of the problem by going to the exact traces which are causing the problem and see detailed flamegraphs of individual request traces.
👉 Run aggregates on trace data to get business relevant metrics
![SigNoz Feature](https://signoz.io/img/readme_feature1.jpg)
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
<br /><br />
### Features:
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies and Error Rate
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Join our Slack community
Come say Hi to us on [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Features:
- Application overview metrics like RPS, 50th/90th/99th Percentile latencies, and Error Rate
- Slowest endpoints in your application
- See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc
- Filter traces by service name, operation, latency, error, tags/annotations.
- Aggregate metrics on filtered traces. Eg, you can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Filter traces by service name, operation, latency, error, tags/annotations.
- Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Unified UI for metrics and traces. No need to switch from Prometheus to Jaeger to debug issues.
- In-built workflows to reduce your efforts in detecting common issues like new deployment failures, 3rd party slow APIs, etc (Coming Soon)
- Anomaly Detection Framework (Coming Soon)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Why SigNoz?
Being developers, we found it annoying to rely on closed source SaaS vendors for every small feature we wanted. Closed source vendors often surprise you with huge month end bills without any transparency.
We wanted to make a self-hosted & open source version of tools like DataDog, NewRelic for companies that have privacy and security concerns about having customer data going to third party services.
Being open source also gives you complete control of your configuration, sampling, uptimes. You can also build modules over SigNoz to extend business specific capabilities
### Languages supported:
We support [OpenTelemetry](https://opentelemetry.io) as the library which you can use to instrument your applications. So any framework and language supported by OpenTelemetry is also supported by SigNoz. Some of the main supported languages are:
- Java
- Python
- NodeJS
- Go
You can find the complete list of languages here - https://opentelemetry.io/docs/
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Getting Started
### Deploy using Docker
Please follow the steps listed [here](https://signoz.io/docs/deployment/docker/) to install using docker
The [troubleshooting instructions](https://signoz.io/docs/deployment/troubleshooting) may be helpful if you face any issues.
<p>&nbsp </p>
### Deploy in Kubernetes using Helm
Please follow the steps listed [here](https://signoz.io/docs/deployment/helm_chart) to install using helm charts
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparisons to Familiar Tools
### SigNoz vs Prometheus
Prometheus is good if you want to do just metrics. But if you want to have a seamless experience between metrics and traces, then current experience of stitching together Prometheus & Jaeger is not great.
Our goal is to provide an integrated UI between metrics & traces - similar to what SaaS vendors like Datadog provides - and give advanced filtering and aggregation over traces, something which Jaeger currently lack.
<p>&nbsp </p>
### SigNoz vs Jaeger
Jaeger only does distributed tracing. SigNoz does both metrics and traces, and we also have log management in our roadmap.
Moreover, SigNoz has few more advanced features wrt Jaeger:
- Jaegar UI doesnt show any metrics on traces or on filtered traces
- Jaeger cant get aggregates on filtered traces. For example, p99 latency of requests which have tag - customer_type='premium'. This can be done easily on SigNoz
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Contributing
### Motivation:
- SaaS vendors charge insane amount to provide Application Monitoring. They often surprise you by huge month end bills without any tranparency of data sent to them.
- Data privacy and compliance demands data to not leave the network boundary
- No more magic happening in agents installed in your infra. You take control of sampling, uptime, configuration. Also, you can build modules over SigNoz to extend business specific capabilities.
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Documentation
You can find docs at https://signoz.io/docs/. If you need any clarification or find something missing, feel free to raise a GitHub issue with the label `documentation` or reach out to us at the community slack channel.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Community
Join the [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)
As always, thanks to our amazing contributors!
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>
# Getting Started
Deploy in Kubernetes using Helm. Below steps will install the SigNoz in platform namespace inside you k8s cluster.
```console
git clone https://github.com/SigNoz/signoz.git && cd signoz
helm dependency update deploy/kubernetes/platform
kubectl create ns platform
helm -n platform install signoz deploy/kubernetes/platform
kubectl -n platform apply -Rf deploy/kubernetes/jobs
kubectl -n platform apply -f deploy/kubernetes/otel-collector
```
**You can choose a different namespace too. In that case, you need to point your applications to correct address to send traces. In our sample application just change the `JAEGER_ENDPOINT` environment variable in `sample-apps/hotrod/deployment.yaml`*
### Test HotROD application with SigNoz
```console
kubectl create ns sample-application
kubectl -n sample-application apply -Rf sample-apps/hotrod/
```
### How to generate load
`kubectl -n sample-application run strzal --image=djbingham/curl --restart='OnFailure' -i --tty --rm --command -- curl -X POST -F 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm`
### See UI
`kubectl -n platform port-forward svc/signoz-frontend 3000:3000`
### How to stop load
`kubectl -n sample-application run strzal --image=djbingham/curl --restart='OnFailure' -i --tty --rm --command -- curl http://locust-master:8089/stop`
# Documentation
You can find docs at https://signoz.io/docs/installation. If you need any clarification or find something missing, feel free to raise a github issue with label `documentation` or reach out to us at community slack channel.
# Community
Join the [slack community](https://app.slack.com/client/T01HWUTP0LT#/) to know more about distributed tracing, observability or SigNoz and to connect with other users and contributors.
If you have any ideas, questions or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)

159
README.pt-br.md Normal file
View File

@@ -0,0 +1,159 @@
<p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">Monitore seus aplicativos e solucione problemas em seus aplicativos implantados, uma alternativa de código aberto para soluções como DataDog, New Relic, entre outras.</p>
</p>
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p>
<h3 align="center">
<a href="https://signoz.io/docs"><b>Documentação</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Comunidade no Slack</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
##
SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problemas em seus aplicativos implantados. SigNoz usa rastreamento distribuído para obter visibilidade em sua pilha de software.
👉 Você pode verificar métricas como latência p99, taxas de erro em seus serviços, requisições às APIs externas e endpoints individuais.
👉 Você pode encontrar a causa raiz do problema acessando os rastreamentos exatos que estão causando o problema e verificar os quadros detalhados de cada requisição individual.
👉 Execute agregações em dados de rastreamento para obter métricas de negócios relevantes.
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Junte-se à nossa comunidade no Slack
Venha dizer oi para nós no [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## Funções:
- Métricas de visão geral do aplicativo, como RPS, latências de percentual 50/90/99 e taxa de erro
- Endpoints mais lentos em seu aplicativo
- Visualize o rastreamento preciso de requisições de rede para descobrir problemas em serviços downstream, consultas lentas de banco de dados, chamadas para serviços de terceiros, como gateways de pagamento, etc.
- Filtre os rastreamentos por nome de serviço, operação, latência, erro, tags / anotações.
- Execute agregações em dados de rastreamento (eventos / extensões) para obter métricas de negócios relevantes, como por exemplo, você pode obter a taxa de erro e a latência do 99º percentil de `customer_type: gold` or `deployment_version: v2` or `external_call: paypal`
- Interface de Usuário unificada para métricas e rastreios. Não há necessidade de mudar de Prometheus para Jaeger para depurar problemas.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## Por que escolher SigNoz?
Sendo desenvolvedores, achamos irritante contar com fornecedores de SaaS de código fechado para cada pequeno recurso que queríamos. Fornecedores de código fechado costumam surpreendê-lo com enormes contas no final do mês de uso sem qualquer transparência .
Queríamos fazer uma versão auto-hospedada e de código aberto de ferramentas como DataDog, NewRelic para empresas que têm preocupações com privacidade e segurança em ter dados de clientes indo para serviços de terceiros.
Ser open source também oferece controle completo de sua configuração, amostragem e tempos de atividade. Você também pode construir módulos sobre o SigNoz para estender recursos específicos do negócio.
### Linguagens Suportadas:
Nós apoiamos a biblioteca [OpenTelemetry](https://opentelemetry.io) como a biblioteca que você pode usar para instrumentar seus aplicativos. Em outras palavras, SigNoz oferece suporte a qualquer framework e linguagem que suporte a biblioteca OpenTelemetry. As principais linguagens suportadas incluem:
- Java
- Python
- NodeJS
- Go
Você pode encontrar a lista completa de linguagens aqui - https://opentelemetry.io/docs/
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## Iniciando
### Implantar usando Docker
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/docker/) para instalar usando o Docker.
Esse [guia para solução de problemas](https://signoz.io/docs/deployment/troubleshooting) pode ser útil se você enfrentar quaisquer problemas.
<p>&nbsp </p>
### Implentar no Kubernetes usando Helm
Siga as etapas listadas [aqui](https://signoz.io/docs/deployment/helm_chart) para instalar usando helm charts.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparações com ferramentas similares
### SigNoz ou Prometheus
Prometheus é bom se você quiser apenas fazer métricas. Mas se você quiser ter uma experiência perfeita entre métricas e rastreamentos, a experiência atual de unir Prometheus e Jaeger não é ótima.
Nosso objetivo é fornecer uma interface do usuário integrada entre métricas e rastreamentos - semelhante ao que fornecedores de SaaS como o Datadog fornecem - e fornecer filtragem e agregação avançada sobre rastreamentos, algo que a Jaeger atualmente carece.
<p>&nbsp </p>
### SigNoz ou Jaeger
Jaeger só faz rastreamento distribuído. SigNoz faz métricas e rastreia, e também temos gerenciamento de log em nossos planos.
Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger:
- A interface de usuário do Jaegar não mostra nenhuma métrica em traces ou em traces filtrados
- Jaeger não pode obter agregados em rastros filtrados. Por exemplo, latência p99 de solicitações que possuem tag - customer_type='premium'. Isso pode ser feito facilmente com SigNoz.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## Contribuindo
Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz.
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## Documentação
Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver alguma dúvida ou sentir falta de algo, sinta-se à vontade para criar uma issue com a tag `documentation` no GitHub ou entre em contato conosco no canal da comunidade no Slack.
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## Comunidade
Junte-se a [comunidade no Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions)
Como sempre, obrigado aos nossos incríveis colaboradores!
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>

150
README.zh-cn.md Normal file
View File

@@ -0,0 +1,150 @@
<p align="center">
<img src="https://res.cloudinary.com/dcv3epinx/image/upload/v1618904450/signoz-images/LogoGithub_sigfbu.svg" alt="SigNoz-logo" width="240" />
<p align="center">监视你的应用并可排查已部署应用中的问题这是一个开源的可替代DataDog、NewRelic的方案</p>
</p>
<p align="center">
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
</p>
##
SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNoz使用分布式跟踪来增加软件技术栈的可见性。
👉 你能看到一些性能矩阵服务、外部api调用、每个终端(endpoint)的p99延迟和错误率。
👉 通过准确的跟踪来确定是什么引起了问题,并且可以看到每个独立请求的帧图(framegraph),这样你就能找到根本原因。
![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png)
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## 加入我们的Slack社区
来[Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 跟我们打声招呼👋
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Features.svg" width="50px" />
## 功能:
- 应用总览矩阵(matrix)如RPS, 50/90/99百分比延迟率错误率
- 应用中最慢的终端(endpoint)
- 查看准确的网络请求跟踪来分析下游服务问题、慢数据库查询问题 及调用第三方服务如支付网关的问题
- 通过服务名称、操作、延迟、错误、标签来过滤跟踪
- 对过滤后的跟踪数据做矩阵聚合。比如,获得过滤条件`customer_type: gold` or `deployment_version: v2` or `external_call: paypal`的错误率和p99延迟
- 整合的矩阵和跟踪用户界面。不需要像从Prometheus切换到Jaeger才能调试问题
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/WhatsCool.svg" width="50px" />
## 为何选择SigNoz
作为开发人员我们发现依赖闭源的SaaS厂商提供的每个小功能有些麻烦闭源厂商通常会给你一份巨额月付账单但不提供足够的透明度你不知道你为哪些功能付费。
我们想做一个自服务的开源版本的工具类似于DataDog和NewRelic用于那些对客户数据流入第三方有隐私和安全担忧的厂商。
开源也让你对配置、采样和上线率有完整的控制你可以在SigNoz基础上构建模块来满足特定的商业需求。
### 语言支持
我们支持[OpenTelemetry](https://opentelemetry.io)库你可以使用它来装备应用。也就是说SigNoz支持任何支持OpenTelemetry库的框架和语言。 主要支持语言包括:
- Java
- Python
- NodeJS
- Go
你可以在这个文档里找到完整的语言列表 - https://opentelemetry.io/docs/
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Philosophy.svg" width="50px" />
## 入门
### 使用Docker部署
请按照[这里](https://signoz.io/docs/deployment/docker/)列出的步骤使用Docker来安装
如果你遇到任何问题,这个[排查指南](https://signoz.io/docs/deployment/troubleshooting)会对你有帮助。
<p>&nbsp </p>
### 使用Helm在Kubernetes上部署
请跟着[这里](https://signoz.io/docs/deployment/helm_chart)的步骤使用helm charts安装
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/UseSigNoz.svg" width="50px" />
## Comparisons to Familiar Tools
### SigNoz vs Prometheus
如果你只是需要矩阵那Prometheus是不错的但如果你要无缝的在矩阵和跟踪之间切换那目前把Prometheus & Jaeger串起来的体验并不好。
我们的目标是在矩阵和跟踪之间提供整合的UI - 类似于Datadog这样的Saas厂提供的方案能够对跟踪进行过滤和聚合这是目前Jaeger缺失的功能。
<p>&nbsp </p>
### SigNoz vs Jaeger
Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们在计划中也有日志管理功能。
并且SigNoz有一些Jaeger没有的高级功能
- Jaegar UI无法在跟踪或过滤的跟踪基础上展示矩阵。
- Jaeger不能在过滤的跟踪上进行聚合操作。例如拥有tag为customer_type='premium'的所有请求的p99延迟在SigNoz里这很容易实现。
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributors.svg" width="50px" />
## 贡献
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
还不清楚怎么开始? 只需在[slack社区](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)的`#contributing`频道里ping我们。
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/DevelopingLocally.svg" width="50px" />
## 文档
文档在这里https://signoz.io/docs/. 如果你觉得有任何不清楚或者有文档缺失请在Github里发一个问题并使用标签 `documentation` 或者在社区stack频道里告诉我们。
<br /><br />
<img align="left" src="https://signoz-public.s3.us-east-2.amazonaws.com/Contributing.svg" width="50px" />
## 社区
加入[slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA),了解更多关于分布式跟踪、可观察性(observability)以及SigNoz。同时与其他用户和贡献者一起交流。
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。
最后,感谢我们这些优秀的贡献者们。
<a href="https://github.com/signoz/signoz/graphs/contributors">
<img src="https://contrib.rocks/image?repo=signoz/signoz" />
</a>

View File

@@ -0,0 +1,517 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<!-- For HTTPS and SSL over native protocol. -->
<!--
<https_port>8443</https_port>
<tcp_ssl_port>9440</tcp_ssl_port>
-->
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
<client> <!-- Used for connecting to https dictionary source -->
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
<invalidCertificateHandler>
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
<name>RejectCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
<!--
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
-->
<!-- Port for communication between replicas. Used for data exchange. -->
<interserver_http_port>9009</interserver_http_port>
<!-- Hostname that is used by other replicas to request this server.
If not specified, than it is determined analoguous to 'hostname -f' command.
This setting could be used to switch replication to another network interface.
-->
<!--
<interserver_http_host>example.yandex.ru</interserver_http_host>
-->
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
<listen_host>::</listen_host>
<!-- Same for hosts with disabled ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
correct maximum value. -->
<!-- <max_open_files>262144</max_open_files> -->
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
Uncompressed cache is advantageous only for very short queries and in rare cases.
-->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<!-- Approximate size of mark cache, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
You should not lower this value.
-->
<mark_cache_size>5368709120</mark_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>users.xml</users_config>
<!-- Default profile of settings.. -->
<default_profile>default</default_profile>
<!-- Default database. -->
<default_database>default</default_database>
<!-- Server time zone could be set here.
Time zone is used when converting between String and DateTime types,
when printing DateTime in text formats and parsing DateTime from text,
it is used in date and time related functions, if specific time zone was not passed as an argument.
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
If not specified, system time zone at server startup is used.
Please note, that server could display time zone alias instead of specified name.
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
-->
<!-- <timezone>Europe/Moscow</timezone> -->
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
-->
<!-- <umask>022</umask> -->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.yandex/reference_en.html#Distributed
-->
<remote_servers incl="clickhouse_remote_servers" >
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_shard_localhost>
</remote_servers>
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Data%20replication
-->
<zookeeper incl="zookeeper-servers" optional="true" />
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
-->
<macros incl="macros" optional="true" />
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- Maximum session timeout, in seconds. Default: 3600. -->
<max_session_timeout>3600</max_session_timeout>
<!-- Default session timeout, in seconds. Default: 60. -->
<default_session_timeout>60</default_session_timeout>
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
<!--
interval - send every X second
root_path - prefix for keys
hostname_in_path - append hostname to root_path (default = true)
metrics - send data from table system.metrics
events - send data from table system.events
asynchronous_metrics - send data from table system.asynchronous_metrics
-->
<!--
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>60</interval>
<root_path>one_min</root_path>
<hostname_in_path>true<hostname_in_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
</graphite>
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>1</interval>
<root_path>one_sec</root_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>false</asynchronous_metrics>
</graphite>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.
When query log structure is changed after system update,
then old table will be renamed and new table will be created automatically.
-->
<database>system</database>
<table>query_log</table>
<!-- Interval of flushing data. -->
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<!-- Uncomment if use part_log
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
-->
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
<!-- Configuration of external dictionaries. See:
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
-->
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.
-->
<compression incl="clickhouse_compression">
<!--
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
<case>
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
<!- - What compression method to use. - ->
<method>zstd</method>
</case>
-->
</compression>
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
<!--
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
-->
<!-- Protection from accidental DROP.
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
Uncomment to disable protection.
-->
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
<!-- Example of parameters for GraphiteMergeTree table engine -->
<graphite_rollup>
<!-- carbon -->
<pattern>
<regexp>^carbon\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>7776000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- collectd -->
<pattern>
<regexp>^collectd\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>43200</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- high -->
<pattern>
<regexp>^high\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>172800</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- medium -->
<pattern>
<regexp>^medium\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- low -->
<pattern>
<regexp>^low\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>600</precision>
</retention>
<retention>
<age>15552000</age>
<precision>1800</precision>
</retention>
<retention>
<age>31536000</age>
<precision>3600</precision>
</retention>
<retention>
<age>63072000</age>
<precision>21600</precision>
</retention>
<retention>
<age>126144000</age>
<precision>43200</precision>
</retention>
<retention>
<age>252288000</age>
<precision>86400</precision>
</retention>
<retention>
<age>315360000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- default -->
<default>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</default>
</graphite_rollup>
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
The directory will be created if it doesn't exist.
-->
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
</yandex>

View File

@@ -0,0 +1,113 @@
version: "3"
services:
clickhouse:
image: yandex/clickhouse-server
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
query-service:
image: signoz/query-service:0.4.1
container_name: query-service
restart: always
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
- clickhouse
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.4.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=2000"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
deploy:
mode: replicated
replicas: 3
depends_on:
- clickhouse
otel-collector-hostmetrics:
image: signoz/otelcontribcol:0.4.0
command: ["--config=/etc/otel-collector-config-hostmetrics.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config-hostmetrics.yaml:/etc/otel-collector-config-hostmetrics.yaml
depends_on:
- clickhouse
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,31 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@@ -0,0 +1,72 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
hostmetrics:
collection_interval: 60s
scrapers:
cpu:
load:
memory:
disk:
filesystem:
network:
# Data sources: metrics
prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
dns_sd_configs:
- names:
- 'tasks.signoz_otel-collector'
type: 'A'
port: 8888
- job_name: "otel-collector-hostmetrics"
scrape_interval: 10s
static_configs:
- targets: ["otel-collector-hostmetrics:8888"]
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]
metrics:
receivers: [otlp, prometheus, hostmetrics]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -0,0 +1,47 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
remote_read:
- url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@@ -0,0 +1,16 @@
from locust import HttpUser, task, between
class UserTasks(HttpUser):
wait_time = between(5, 15)
@task
def rachel(self):
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
@task
def trom(self):
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
@task
def japanese(self):
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
@task
def coffee(self):
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")

View File

@@ -0,0 +1,30 @@
server {
listen 3000;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api {
proxy_pass http://query-service:8080/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

View File

@@ -0,0 +1,517 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<!-- For HTTPS and SSL over native protocol. -->
<!--
<https_port>8443</https_port>
<tcp_ssl_port>9440</tcp_ssl_port>
-->
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
<client> <!-- Used for connecting to https dictionary source -->
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
<invalidCertificateHandler>
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
<name>RejectCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
<!--
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
-->
<!-- Port for communication between replicas. Used for data exchange. -->
<interserver_http_port>9009</interserver_http_port>
<!-- Hostname that is used by other replicas to request this server.
If not specified, than it is determined analoguous to 'hostname -f' command.
This setting could be used to switch replication to another network interface.
-->
<!--
<interserver_http_host>example.yandex.ru</interserver_http_host>
-->
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
<listen_host>::</listen_host>
<!-- Same for hosts with disabled ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
correct maximum value. -->
<!-- <max_open_files>262144</max_open_files> -->
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
Uncompressed cache is advantageous only for very short queries and in rare cases.
-->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<!-- Approximate size of mark cache, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
You should not lower this value.
-->
<mark_cache_size>5368709120</mark_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>users.xml</users_config>
<!-- Default profile of settings.. -->
<default_profile>default</default_profile>
<!-- Default database. -->
<default_database>default</default_database>
<!-- Server time zone could be set here.
Time zone is used when converting between String and DateTime types,
when printing DateTime in text formats and parsing DateTime from text,
it is used in date and time related functions, if specific time zone was not passed as an argument.
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
If not specified, system time zone at server startup is used.
Please note, that server could display time zone alias instead of specified name.
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
-->
<!-- <timezone>Europe/Moscow</timezone> -->
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
-->
<!-- <umask>022</umask> -->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.yandex/reference_en.html#Distributed
-->
<remote_servers incl="clickhouse_remote_servers" >
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_shard_localhost>
</remote_servers>
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Data%20replication
-->
<zookeeper incl="zookeeper-servers" optional="true" />
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
-->
<macros incl="macros" optional="true" />
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- Maximum session timeout, in seconds. Default: 3600. -->
<max_session_timeout>3600</max_session_timeout>
<!-- Default session timeout, in seconds. Default: 60. -->
<default_session_timeout>60</default_session_timeout>
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
<!--
interval - send every X second
root_path - prefix for keys
hostname_in_path - append hostname to root_path (default = true)
metrics - send data from table system.metrics
events - send data from table system.events
asynchronous_metrics - send data from table system.asynchronous_metrics
-->
<!--
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>60</interval>
<root_path>one_min</root_path>
<hostname_in_path>true<hostname_in_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
</graphite>
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>1</interval>
<root_path>one_sec</root_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>false</asynchronous_metrics>
</graphite>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.
When query log structure is changed after system update,
then old table will be renamed and new table will be created automatically.
-->
<database>system</database>
<table>query_log</table>
<!-- Interval of flushing data. -->
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<!-- Uncomment if use part_log
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
-->
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
<!-- Configuration of external dictionaries. See:
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
-->
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.
-->
<compression incl="clickhouse_compression">
<!--
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
<case>
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
<!- - What compression method to use. - ->
<method>zstd</method>
</case>
-->
</compression>
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
<!--
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
-->
<!-- Protection from accidental DROP.
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
Uncomment to disable protection.
-->
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
<!-- Example of parameters for GraphiteMergeTree table engine -->
<graphite_rollup>
<!-- carbon -->
<pattern>
<regexp>^carbon\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>7776000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- collectd -->
<pattern>
<regexp>^collectd\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>43200</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- high -->
<pattern>
<regexp>^high\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>172800</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- medium -->
<pattern>
<regexp>^medium\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- low -->
<pattern>
<regexp>^low\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>600</precision>
</retention>
<retention>
<age>15552000</age>
<precision>1800</precision>
</retention>
<retention>
<age>31536000</age>
<precision>3600</precision>
</retention>
<retention>
<age>63072000</age>
<precision>21600</precision>
</retention>
<retention>
<age>126144000</age>
<precision>43200</precision>
</retention>
<retention>
<age>252288000</age>
<precision>86400</precision>
</retention>
<retention>
<age>315360000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- default -->
<default>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</default>
</graphite_rollup>
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
The directory will be created if it doesn't exist.
-->
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
</yandex>

View File

@@ -0,0 +1,112 @@
version: "2.4"
services:
clickhouse:
image: yandex/clickhouse-server
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
query-service:
image: signoz/query-service:0.4.4
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.4
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.4.2
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
- "8889:8889" # prometheus exporter
depends_on:
clickhouse:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.4.2
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,31 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@@ -0,0 +1,67 @@
receivers:
otlp/spanmetrics:
protocols:
grpc:
endpoint: "localhost:12345"
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
hostmetrics:
collection_interval: 30s
scrapers:
cpu:
load:
memory:
disk:
filesystem:
network:
processors:
batch:
send_batch_size: 1000
timeout: 10s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
prometheus:
endpoint: "0.0.0.0:8889"
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhouse]
metrics:
receivers: [otlp, hostmetrics]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@@ -0,0 +1,44 @@
receivers:
otlp:
protocols:
grpc:
http:
# Data sources: metrics
prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
scrape_interval: 60s
static_configs:
- targets: ["otel-collector:8889"]
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
service:
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp, prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
remote_read:
- url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@@ -0,0 +1,16 @@
from locust import HttpUser, task, between
class UserTasks(HttpUser):
wait_time = between(5, 15)
@task
def rachel(self):
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
@task
def trom(self):
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
@task
def japanese(self):
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
@task
def coffee(self):
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")

View File

@@ -0,0 +1,30 @@
server {
listen 3000;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api {
proxy_pass http://query-service:8080/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -0,0 +1,276 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/data
- coordinator_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_tiny/coordinator
- environment_tiny/common
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_tiny/broker
- environment_tiny/common
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/data
- historical_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_tiny/historical
- environment_tiny/common
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/data
- middle_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_tiny/middlemanager
- environment_tiny/common
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_tiny/router
- environment_tiny/common
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.4.0
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
container_name: query-service
depends_on:
- router
ports:
- "8080:8080"
volumes:
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,272 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/druid/deepStorage
- coordinator_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_small/coordinator
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_small/broker
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/druid/deepStorage
- historical_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_small/historical
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/druid/deepStorage
- middle_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_small/middlemanager
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_small/router
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
interval: 30s
timeout: 5s
retries: 5
flatten-processor:
image: signoz/flattener-processor:0.4.0
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.4.1
container_name: query-service
depends_on:
- router
ports:
- "8080:8080"
volumes:
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- STORAGE=druid
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 leagcy grpc receiver
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ./locust-scripts:/locust

View File

@@ -0,0 +1 @@
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]

View File

@@ -0,0 +1,69 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafka:9092"
}
}
}

View File

@@ -0,0 +1,53 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=768m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=768m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=100MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,52 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,53 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=1280m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=1280m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=200MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,53 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=1g
DRUID_XMS=1g
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=2g
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=2g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=200MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,52 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=128m
DRUID_XMS=128m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms128m", "-Xmx128m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,52 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,26 @@
# For S3 storage
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type=s3
# druid_storage_bucket=<s3-bucket-name>
# druid_storage_baseKey=druid/segments
# AWS_ACCESS_KEY_ID=<s3-access-id>
# AWS_SECRET_ACCESS_KEY=<s3-access-key>
# AWS_REGION=<s3-aws-region>
# druid_indexer_logs_type=s3
# druid_indexer_logs_s3Bucket=<s3-bucket-name>
# druid_indexer_logs_s3Prefix=druid/indexing-logs
# -----------------------------------------------------------
# For local storage
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_storage_type=local
druid_storage_storageDirectory=/opt/data/segments
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/data/indexing-logs

View File

@@ -0,0 +1,49 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,49 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=512m
DRUID_XMS=512m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,50 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,49 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
DRUID_XMX=64m
DRUID_XMS=64m
DRUID_MAXNEWSIZE=256m
DRUID_NEWSIZE=256m
DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>

View File

@@ -0,0 +1,51 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
queued_retry:
num_workers: 4
queue_size: 100
retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
kafka/traces:
brokers:
- kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka/metrics]

511
deploy/install.sh Executable file
View File

@@ -0,0 +1,511 @@
#!/bin/bash
set -o errexit
# Regular Colors
Black='\033[0;30m' # Black
Red='\[\e[0;31m\]' # Red
Green='\033[0;32m' # Green
Yellow='\033[0;33m' # Yellow
Blue='\033[0;34m' # Blue
Purple='\033[0;35m' # Purple
Cyan='\033[0;36m' # Cyan
White='\033[0;37m' # White
NC='\033[0m' # No Color
is_command_present() {
type "$1" >/dev/null 2>&1
}
# Check whether 'wget' command exists.
has_wget() {
has_cmd wget
}
# Check whether 'curl' command exists.
has_curl() {
has_cmd curl
}
# Check whether the given command exists.
has_cmd() {
command -v "$1" > /dev/null 2>&1
}
is_mac() {
[[ $OSTYPE == darwin* ]]
}
check_os() {
if is_mac; then
package_manager="brew"
desired_os=1
os="Mac"
return
fi
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in
Ubuntu*)
desired_os=1
os="ubuntu"
package_manager="apt-get"
;;
Amazon\ Linux*)
desired_os=1
os="amazon linux"
package_manager="yum"
;;
Debian*)
desired_os=1
os="debian"
package_manager="apt-get"
;;
Linux\ Mint*)
desired_os=1
os="linux mint"
package_manager="apt-get"
;;
Red\ Hat*)
desired_os=1
os="red hat"
package_manager="yum"
;;
CentOS*)
desired_os=1
os="centos"
package_manager="yum"
;;
SLES*)
desired_os=1
os="sles"
package_manager="zypper"
;;
openSUSE*)
desired_os=1
os="opensuse"
package_manager="zypper"
;;
*)
desired_os=0
os="Not Found: $os_name"
esac
}
# This function checks if the relevant ports required by SigNoz are available or not
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|3000|8080"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
elif is_command_present ss; then
# The `ss` command seems to be a better/faster version of `netstat`, but is not available on all Linux
# distributions by default. Other distributions have `ss` but no `netstat`. So, we try for `ss` first, then
# fallback to `netstat`.
port_check_output="$(ss --all --numeric --tcp | awk '$1 == "LISTEN" && $4 ~ /^.*:('"$ports_pattern"')$/')"
elif is_command_present netstat; then
port_check_output="$(netstat --all --numeric --tcp | awk '$6 == "LISTEN" && $4 ~ /^.*:('"$ports_pattern"')$/')"
fi
if [[ -n $port_check_output ]]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
exit 1
fi
}
install_docker() {
echo "++++++++++++++++++++++++"
echo "Setting up docker repos"
if [[ $package_manager == apt-get ]]; then
apt_cmd="sudo apt-get --yes --quiet"
$apt_cmd update
$apt_cmd install software-properties-common gnupg-agent
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
$apt_cmd update
echo "Installing docker"
$apt_cmd install docker-ce docker-ce-cli containerd.io
elif [[ $package_manager == zypper ]]; then
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
echo "Installing docker"
if [[ $os == sles ]]; then
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
os_arch="$(uname -i)"
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
fi
$zypper_cmd install docker docker-runc containerd
sudo systemctl enable docker.service
elif [[ $package_manager == yum && $os == 'amazon linux' ]]; then
echo
echo "Amazon Linux detected ... "
echo
sudo yum install docker
sudo service docker start
else
yum_cmd="sudo yum --assumeyes --quiet"
$yum_cmd install yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
echo "Installing docker"
$yum_cmd install docker-ce docker-ce-cli containerd.io
fi
}
install_docker_machine() {
echo "\nInstalling docker machine ..."
if [[ $os == "Mac" ]];then
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
chmod +x /usr/local/bin/docker-machine
else
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
chmod +x /tmp/docker-machine
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
fi
}
install_docker_compose() {
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
if [[ ! -f /usr/bin/docker-compose ]];then
echo "++++++++++++++++++++++++"
echo "Installing docker-compose"
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
echo "docker-compose installed!"
echo ""
fi
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
echo "Refer https://docs.docker.com/compose/install/ for installing docker-compose."
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
exit 1
fi
}
start_docker() {
echo "Starting Docker ..."
if [ $os = "Mac" ]; then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
else
if ! sudo systemctl is-active docker.service > /dev/null; then
echo "Starting docker service"
sudo systemctl start docker.service
fi
fi
}
wait_for_containers_start() {
local timeout=$1
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)"
if [[ status_code -eq 200 ]]; then
break
else
if [ $setup_type == 'druid' ]; then
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
fi
fi
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
fi
((timeout--))
sleep 1
done
echo ""
}
bye() { # Prints a friendly good bye message and exits the script.
if [ "$?" -ne 0 ]; then
set +o errexit
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
echo -e "\n📨 Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo ""
echo -e "\nWe will reach out to you at the email provided shortly, Exiting for now. Bye! 👋 \n"
exit 0
fi
}
echo -e "👋 Thank you for trying out SigNoz! "
echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
echo -e "Detecting your OS ..."
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
echo ""
echo -e "👉 ${RED}Two ways to go forward\n"
echo -e "${RED}1) ClickHouse as database (default)\n"
echo -e "${RED}2) Kafka + Druid as datastore \n"
read -p "⚙️ Enter your preference (1/2):" choice_setup
while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
do
# echo $choice_setup
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
read -p "⚙️ Enter your preference (1/2): " choice_setup
# echo $choice_setup
done
if [[ $choice_setup == "1" || $choice_setup == "" ]];then
setup_type='clickhouse'
else
setup_type='druid'
fi
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
# Run bye if failure happens
trap bye EXIT
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
if [[ $desired_os -eq 0 ]];then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
fi
# check_ports_occupied
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
if ! is_command_present docker; then
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
install_docker
else
echo ""
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
exit 1
fi
fi
# Install docker-compose
if ! is_command_present docker-compose; then
install_docker_compose
fi
start_docker
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
fi
echo ""
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
fi
wait_for_containers_start 60
echo ""
if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
if [ $setup_type == 'clickhouse' ]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
fi
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3000"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
fi
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "👉 Need help Getting Started?"
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo ""
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
fi
echo -e "\n🙏 Thank you!\n"

View File

@@ -26,4 +26,4 @@ spec:
name: retention-config
restartPolicy: Never
backoffLimit: 4
backoffLimit: 8

View File

@@ -5,55 +5,72 @@ metadata:
data:
supervisor-spec.json: |
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"TagsKeys",
"TagsValues",
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" }
]
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "signoz-kafka:9092"
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "signoz-kafka:9092"
}
}
}
}

View File

@@ -24,4 +24,4 @@ spec:
configMap:
name: supervisor-config
restartPolicy: Never
backoffLimit: 4
backoffLimit: 8

View File

@@ -8,12 +8,18 @@ metadata:
data:
otel-collector-config: |
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
@@ -30,14 +36,25 @@ data:
health_check: {}
zpages: {}
exporters:
kafka:
kafka/traces:
brokers:
- signoz-kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- signoz-kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger]
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka/metrics]

View File

@@ -25,7 +25,7 @@ spec:
- "--config=/conf/otel-collector-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
image: otel/opentelemetry-collector-dev:latest
image: otel/opentelemetry-collector:0.18.0
name: otel-collector
resources:
limits:
@@ -37,7 +37,9 @@ spec:
ports:
- containerPort: 55679 # Default endpoint for ZPages.
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
- containerPort: 14250 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 9411 # Default endpoint for Zipkin receiver.
- containerPort: 8888 # Default endpoint for querying metrics.

View File

@@ -11,6 +11,14 @@ spec:
port: 55680
protocol: TCP
targetPort: 55680
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
port: 55681
protocol: TCP
targetPort: 55681
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
port: 4317
protocol: TCP
targetPort: 4317
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
port: 14250
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.

View File

@@ -10,12 +10,12 @@ dependencies:
version: 0.2.18
- name: flattener-processor
repository: file://./signoz-charts/flattener-processor
version: 0.1.1
version: 0.3.6
- name: query-service
repository: file://./signoz-charts/query-service
version: 0.1.1
version: 0.3.6
- name: frontend
repository: file://./signoz-charts/frontend
version: 0.1.6
digest: sha256:1acfbd4b86e7ca6b70101f7dc3b2ab26aa5e72df2f454800f6dda7e645580978
generated: "2021-01-07T17:50:04.227534+05:30"
version: 0.3.6
digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228
generated: "2021-08-23T12:06:37.231066+05:30"

View File

@@ -15,12 +15,12 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.6
version: 0.3.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.1.1
appVersion: 0.3.2
dependencies:
- name: zookeeper
@@ -34,10 +34,10 @@ dependencies:
version: 0.2.18
- name: flattener-processor
repository: "file://./signoz-charts/flattener-processor"
version: 0.1.1
version: 0.3.6
- name: query-service
repository: "file://./signoz-charts/query-service"
version: 0.1.1
version: 0.3.6
- name: frontend
repository: "file://./signoz-charts/frontend"
version: 0.1.6
version: 0.3.6

View File

@@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.1
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.1.1
appVersion: 0.3.6

View File

@@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.6
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.1.7
appVersion: 0.3.6

View File

@@ -9,6 +9,16 @@ data:
server {
listen {{ .Values.service.port }};
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;

View File

@@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.1
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.1.3
appVersion: 0.3.6

View File

@@ -36,7 +36,8 @@ spec:
value: {{ .Values.configVars.DruidClientUrl }}
- name: DruidDatasource
value: {{ .Values.configVars.DruidDatasource }}
- name: STORAGE
value: {{ .Values.configVars.STORAGE }}
# livenessProbe:
# httpGet:

View File

@@ -16,6 +16,7 @@ fullnameOverride: ""
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"

View File

@@ -1,3 +1,7 @@
zookeeper:
autopurge:
purgeInterval: 1
kafka:
zookeeper:
enabled: false
@@ -6,9 +10,18 @@ kafka:
zookeeperConnectionTimeoutMs: 6000
druid:
image:
tag: 0.21.1-rc2
configVars:
# To store data on local disks attached
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]'
druid_storage_type: local
# # To store data in S3
# druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]'
# druid_storage_type: s3
# druid_storage_bucket: signoz-druid
# druid_storage_baseKey: baseKey
@@ -18,7 +31,7 @@ druid:
historical:
persistence:
size: "4Gi"
size: "20Gi"
zkHosts: "signoz-zookeeper:2181"
@@ -35,3 +48,4 @@ query-service:
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid

16
frontend/.babelrc Normal file
View File

@@ -0,0 +1,16 @@
{
"presets": [
"@babel/preset-env",
"@babel/preset-react",
"@babel/preset-typescript"
],
"plugins": [
"react-hot-loader/babel",
"@babel/plugin-proposal-class-properties"
],
"env": {
"production": {
"presets": ["minify"]
}
}
}

View File

@@ -1,3 +1,4 @@
node_modules
.vscode
build
build
.env

2
frontend/.eslintignore Normal file
View File

@@ -0,0 +1,2 @@
node_modules
build

62
frontend/.eslintrc.js Normal file
View File

@@ -0,0 +1,62 @@
module.exports = {
env: {
browser: true,
es2021: true,
node: true,
},
extends: [
'eslint:recommended',
'plugin:react/recommended',
'plugin:@typescript-eslint/recommended',
'plugin:@typescript-eslint/eslint-recommended',
'plugin:prettier/recommended',
],
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaFeatures: {
jsx: true,
},
ecmaVersion: 12,
sourceType: 'module',
},
plugins: [
'react',
'@typescript-eslint',
'simple-import-sort',
'react-hooks',
'prettier',
],
settings: {
react: {
version: 'latest',
},
},
rules: {
'react/jsx-filename-extension': [
'error',
{
extensions: ['.tsx', '.js', '.jsx'],
},
],
'react/prop-types': 'off',
'@typescript-eslint/explicit-function-return-type': 'error',
'@typescript-eslint/no-var-requires': 0,
'linebreak-style': ['error', 'unix'],
// simple sort error
'simple-import-sort/imports': 'error',
'simple-import-sort/exports': 'error',
// hooks
'react-hooks/rules-of-hooks': 'error',
'react-hooks/exhaustive-deps': 'warn',
'prettier/prettier': [
'error',
{},
{
usePrettierrc: true,
},
],
},
};

1
frontend/.npmrc Normal file
View File

@@ -0,0 +1 @@
registry = 'https://registry.npmjs.org/'

1
frontend/.nvmrc Normal file
View File

@@ -0,0 +1 @@
12.13.0

View File

@@ -0,0 +1,8 @@
{
"trailingComma": "all",
"useTabs": true,
"tabWidth": 1,
"singleQuote": true,
"jsxSingleQuote": false,
"semi": true
}

View File

@@ -1,21 +1,26 @@
# stage1 as builder
FROM node:14-alpine as builder
FROM node:12.18.0 as builder
# Add Maintainer Info
LABEL maintainer="signoz"
ARG TARGETOS=linux
ARG TARGETARCH
WORKDIR /frontend
# copy the package.json to install dependencies
COPY package.json ./
# Install the dependencies and make the folder
RUN npm install && mkdir /react-ui && mv ./node_modules ./react-ui
WORKDIR /react-ui
RUN yarn install
COPY . .
# Build the project and copy the files
RUN npm run build
RUN yarn build
FROM nginx:1.15-alpine
FROM nginx:1.18-alpine
#!/bin/sh
@@ -25,7 +30,7 @@ COPY conf/default.conf /etc/nginx/conf.d/default.conf
RUN rm -rf /usr/share/nginx/html/*
# Copy from the stahg 1
COPY --from=builder /react-ui/build /usr/share/nginx/html
COPY --from=builder /frontend/build /usr/share/nginx/html
EXPOSE 3000

View File

@@ -1,3 +1,38 @@
# Configuring Over Local
1. Docker
1. Without Docker
## With Docker
**Building image**
``docker-compose up`
/ This will also run
or
`docker build . -t tagname`
**Tag to remote url- Introduce versinoing later on**
```
docker tag signoz/frontend:latest 7296823551/signoz:latest
```
```
docker-compose up
```
## Without Docker
Follow the steps below
1. ```git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend```
1. change baseURL to ```<test environment URL>``` in file ```src/constants/env.ts```
1. ```yarn install```
1. ```yarn dev```
```Note: Please ping us in #contributing channel in our slack community and we will DM you with <test environment URL>```
# Getting Started with Create React App
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).

View File

@@ -0,0 +1 @@
export default {};

View File

@@ -1,6 +1,15 @@
server {
listen 3000;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;

3
frontend/cypress.json Normal file
View File

@@ -0,0 +1,3 @@
{
"video": false
}

View File

@@ -0,0 +1,47 @@
const Login = ({ email, name }: LoginProps): void => {
const emailInput = cy.findByPlaceholderText('mike@netflix.com');
emailInput.then((emailInput) => {
const element = emailInput[0];
// element is present
expect(element).not.undefined;
expect(element.nodeName).to.be.equal('INPUT');
});
emailInput.type(email).then((inputElements) => {
const inputElement = inputElements[0];
const inputValue = inputElement.getAttribute('value');
expect(inputValue).to.be.equals(email);
});
const firstNameInput = cy.findByPlaceholderText('Mike');
firstNameInput.then((firstNameInput) => {
const element = firstNameInput[0];
// element is present
expect(element).not.undefined;
expect(element.nodeName).to.be.equal('INPUT');
});
firstNameInput.type(name).then((inputElements) => {
const inputElement = inputElements[0];
const inputValue = inputElement.getAttribute('value');
expect(inputValue).to.be.equals(name);
});
const gettingStartedButton = cy.findByText('Get Started');
gettingStartedButton.click();
cy
.intercept('POST', '/api/v1/user?email*', {
statusCode: 200,
})
.as('defaultUser');
cy.wait('@defaultUser');
};
export interface LoginProps {
email: string;
name: string;
}
export default Login;

View File

@@ -0,0 +1,49 @@
import {
getDefaultOption,
getOptions,
} from 'container/Header/DateTimeSelection/config';
// import { AppState } from 'store/reducers';
const CheckRouteDefaultGlobalTimeOptions = ({
route,
}: CheckRouteDefaultGlobalTimeOptionsProps): void => {
cy.visit(Cypress.env('baseUrl') + route);
const allOptions = getOptions(route);
const defaultValue = getDefaultOption(route);
const defaultSelectedOption = allOptions.find((e) => e.value === defaultValue);
expect(defaultSelectedOption).not.undefined;
cy
.findAllByTestId('dropDown')
.find('span')
.then((el) => {
const elements = el.get();
const item = elements[1];
expect(defaultSelectedOption?.label).to.be.equals(
item.innerText,
'Default option is not matching',
);
});
// cy
// .window()
// .its('store')
// .invoke('getState')
// .then((e: AppState) => {
// const { globalTime } = e;
// const { maxTime, minTime } = globalTime;
// // @TODO match the global min time and max time according to the selected option
// });
};
export interface CheckRouteDefaultGlobalTimeOptionsProps {
route: string;
}
export default CheckRouteDefaultGlobalTimeOptions;

View File

@@ -0,0 +1,35 @@
[
{
"serviceName": "frontend",
"p99": 1134610000,
"avgDuration": 744523000,
"numCalls": 267,
"callRate": 0.89,
"numErrors": 0,
"errorRate": 0,
"num4XX": 0,
"fourXXRate": 0
},
{
"serviceName": "customer",
"p99": 734422400,
"avgDuration": 348678530,
"numCalls": 267,
"callRate": 0.89,
"numErrors": 0,
"errorRate": 0,
"num4XX": 0,
"fourXXRate": 0
},
{
"serviceName": "driver",
"p99": 239234080,
"avgDuration": 204662290,
"numCalls": 267,
"callRate": 0.89,
"numErrors": 0,
"errorRate": 0,
"num4XX": 0,
"fourXXRate": 0
}
]

View File

@@ -0,0 +1 @@
{ "status": "success", "data": { "resultType": "matrix", "result": [] } }

View File

@@ -0,0 +1,29 @@
{
"status": "success",
"data": {
"resultType": "matrix",
"result": [
{
"metric": {},
"values": [
[1634741764.961, "0.9"],
[1634741824.961, "0.9"],
[1634741884.961, "0.8666666666666667"],
[1634741944.961, "1"],
[1634742004.961, "0.9166666666666666"],
[1634742064.961, "0.95"],
[1634742124.961, "0.9333333333333333"],
[1634742184.961, "0.95"],
[1634742244.961, "1.0333333333333334"],
[1634742304.961, "0.9333333333333333"],
[1634742364.961, "0.9166666666666666"],
[1634742424.961, "0.9"],
[1634742484.961, "1.0166666666666666"],
[1634742544.961, "0.8333333333333334"],
[1634742604.961, "0.9166666666666666"],
[1634742664.961, "0.95"]
]
}
]
}
}

View File

@@ -0,0 +1,62 @@
[
{
"timestamp": 1634742600000000000,
"p50": 720048500,
"p95": 924409540,
"p99": 974744300,
"numCalls": 48,
"callRate": 0.8,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742540000000000,
"p50": 712614000,
"p95": 955580700,
"p99": 1045595400,
"numCalls": 59,
"callRate": 0.98333335,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742480000000000,
"p50": 720842000,
"p95": 887187600,
"p99": 943676860,
"numCalls": 53,
"callRate": 0.8833333,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742420000000000,
"p50": 712287000,
"p95": 908505540,
"p99": 976507650,
"numCalls": 58,
"callRate": 0.96666664,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742360000000000,
"p50": 697125500,
"p95": 975581800,
"p99": 1190121900,
"numCalls": 54,
"callRate": 0.9,
"numErrors": 0,
"errorRate": 0
},
{
"timestamp": 1634742300000000000,
"p50": 711592500,
"p95": 880559900,
"p99": 1100105500,
"numCalls": 40,
"callRate": 0.6666667,
"numErrors": 0,
"errorRate": 0
}
]

View File

@@ -0,0 +1,9 @@
[
{
"p50": 710824000,
"p95": 1003231400,
"p99": 1231265500,
"numCalls": 299,
"name": "HTTP GET /dispatch"
}
]

View File

@@ -0,0 +1,24 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
describe('App Layout', () => {
beforeEach(() => {
cy.visit(Cypress.env('baseUrl'));
});
it('Check the user is in Logged Out State', async () => {
cy.location('pathname').then((e) => {
expect(e).to.be.equal(ROUTES.SIGN_UP);
});
});
it('Logged In State', () => {
const testEmail = 'test@test.com';
const firstName = 'Test';
cy.login({
email: testEmail,
name: firstName,
});
});
});

View File

@@ -0,0 +1,44 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
describe('default time', () => {
beforeEach(() => {
window.localStorage.setItem('isLoggedIn', 'yes');
});
it('Metrics Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.APPLICATION,
});
});
it('Dashboard Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.ALL_DASHBOARD,
});
});
it('Trace Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.TRACES,
});
});
it('Instrumentation Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.INSTRUMENTATION,
});
});
it('Service Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.SERVICE_MAP,
});
});
it('Settings Page default time', () => {
cy.checkDefaultGlobalOption({
route: ROUTES.SETTINGS,
});
});
});

View File

@@ -0,0 +1,126 @@
/// <reference types="cypress" />
import getGlobalDropDownFormatedDate from 'lib/getGlobalDropDownFormatedDate';
import { AppState } from 'store/reducers';
import topEndPoints from '../../fixtures/topEndPoints.json';
describe('Global Time Metrics Application', () => {
beforeEach(() => {
cy.visit(Cypress.env('baseUrl'));
const testEmail = 'test@test.com';
const firstName = 'Test';
cy.login({
email: testEmail,
name: firstName,
});
});
it('Metrics Application', async () => {
cy
.intercept('GET', '/api/v1/services*', {
fixture: 'defaultApp.json',
})
.as('defaultApps');
cy.wait('@defaultApps');
//clicking on frontend
cy.get('tr:nth-child(1) > td:first-child').click();
cy
.intercept('GET', '/api/v1/service/top_endpoints*', {
fixture: 'topEndPoints.json',
})
.as('topEndPoints');
cy
.intercept('GET', '/api/v1/service/overview?*', {
fixture: 'serviceOverview.json',
})
.as('serviceOverview');
cy
.intercept(
'GET',
`/api/v1/query_range?query=sum(rate(signoz_latency_count*`,
{
fixture: 'requestPerSecond.json',
},
)
.as('requestPerSecond');
cy
.window()
.its('store')
.invoke('getState')
.then((e: AppState) => {
const { globalTime } = e;
const { maxTime, minTime } = globalTime;
// intercepting metrics application call
cy.wait('@topEndPoints');
cy.wait('@serviceOverview');
//TODO add errorPercentage also
// cy.wait('@errorPercentage');
cy.wait('@requestPerSecond');
cy
.get('tbody tr:first-child td:first-child')
.then((el) => {
const elements = el.get();
expect(elements.length).to.be.equals(1);
const element = elements[0];
expect(element.innerText).to.be.equals(topEndPoints[0].name);
})
.click();
cy
.findAllByTestId('dropDown')
.find('span.ant-select-selection-item')
.then((e) => {
const elements = e;
const element = elements[0];
const customSelectedTime = element.innerText;
const startTime = new Date(minTime / 1000000);
const endTime = new Date(maxTime / 1000000);
const startString = getGlobalDropDownFormatedDate(startTime);
const endString = getGlobalDropDownFormatedDate(endTime);
const result = `${startString} - ${endString}`;
expect(customSelectedTime).to.be.equals(result);
});
cy
.findByTestId('dropDown')
.click()
.then(() => {
cy.findByTitle('Last 30 min').click();
});
cy
.findByTestId('dropDown')
.find('span.ant-select-selection-item')
.then((e) => {
const elements = e;
const element = elements[0];
const selectedTime = element.innerText;
expect(selectedTime).to.be.equals('Last 30 min');
});
});
});
});

View File

@@ -0,0 +1,67 @@
/// <reference types="cypress" />
import ROUTES from 'constants/routes';
import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond';
import defaultApps from '../../fixtures/defaultApp.json';
describe('Metrics', () => {
beforeEach(() => {
cy.visit(Cypress.env('baseUrl'));
const testEmail = 'test@test.com';
const firstName = 'Test';
cy.login({
email: testEmail,
name: firstName,
});
});
it('Default Apps', () => {
cy
.intercept('GET', '/api/v1/services*', {
fixture: 'defaultApp.json',
})
.as('defaultApps');
cy.wait('@defaultApps');
cy.location().then((e) => {
expect(e.pathname).to.be.equals(ROUTES.APPLICATION);
cy.get('tbody').then((elements) => {
const trElements = elements.children();
expect(trElements.length).to.be.equal(defaultApps.length);
const getChildren = (row: Element): Element => {
if (row.children.length === 0) {
return row;
}
return getChildren(row.children[0]);
};
// this is row element
trElements.map((index, element) => {
const [
applicationElement,
p99Element,
errorRateElement,
rpsElement,
] = element.children;
const applicationName = getChildren(applicationElement).innerHTML;
const p99Name = getChildren(p99Element).innerHTML;
const errorRateName = getChildren(errorRateElement).innerHTML;
const rpsName = getChildren(rpsElement).innerHTML;
const { serviceName, p99, errorRate, callRate } = defaultApps[index];
expect(applicationName).to.be.equal(serviceName);
expect(p99Name).to.be.equal(convertToNanoSecondsToSecond(p99).toString());
expect(errorRateName).to.be.equals(
parseFloat(errorRate.toString()).toFixed(2),
);
expect(rpsName).to.be.equals(callRate.toString());
});
});
});
});
});
export {};

View File

@@ -0,0 +1,24 @@
/// <reference types="cypress" />
// ***********************************************************
// This example plugins/index.js can be used to load plugins
//
// You can change the location of this file or turn off loading
// the plugins file with the 'pluginsFile' configuration option.
//
// You can read more here:
// https://on.cypress.io/plugins-guide
// ***********************************************************
// This function is called when a project is opened or re-opened (e.g. due to
// the project's config changing)
// cypress/plugins/index.ts
/// <reference types="cypress" />
/**
* @type {Cypress.PluginConfig}
*/
module.exports = (on, config: Cypress.ConfigOptions): void => {};
export {};

View File

@@ -0,0 +1,24 @@
import '@testing-library/cypress/add-commands';
import CheckRouteDefaultGlobalTimeOptions, {
CheckRouteDefaultGlobalTimeOptionsProps,
} from '../CustomFunctions/checkRouteDefaultGlobalTimeOptions';
import Login, { LoginProps } from '../CustomFunctions/Login';
Cypress.Commands.add('login', Login);
Cypress.Commands.add(
'checkDefaultGlobalOption',
CheckRouteDefaultGlobalTimeOptions,
);
declare global {
// eslint-disable-next-line @typescript-eslint/no-namespace
namespace Cypress {
interface Chainable {
login(props: LoginProps): void;
checkDefaultGlobalOption(
props: CheckRouteDefaultGlobalTimeOptionsProps,
): void;
}
}
}

View File

@@ -0,0 +1,20 @@
// ***********************************************************
// This example support/index.js is processed and
// loaded automatically before your test files.
//
// This is a great place to put global configuration and
// behavior that modifies Cypress.
//
// You can change the location of this file or turn off
// automatically serving support files with the
// 'supportFile' configuration option.
//
// You can read more here:
// https://on.cypress.io/configuration
// ***********************************************************
// Import commands.js using ES2015 syntax:
import './commands';
// Alternatively you can use CommonJS syntax:
// require('./commands')

View File

@@ -0,0 +1,13 @@
{
"extends": "../tsconfig.json",
"target": "es5",
"lib": ["es5", "dom"],
"compilerOptions": {
"noEmit": true,
// be explicit about types included
// to avoid clashing with Jest types
"types": ["cypress", "@testing-library/cypress"],
"isolatedModules": false
},
"include": ["../node_modules/cypress", "./**/*.ts"]
}

Some files were not shown because too many files have changed in this diff Show More