Compare commits

...

82 Commits

Author SHA1 Message Date
Piyush Singariya
65ae267dd9 fix: update in global variables, and query operators 2025-03-23 23:36:09 +05:30
Piyush Singariya
9f221dffcc Merge branch 'main' into feat/msk-integration 2025-03-21 12:48:59 +05:30
Vikrant Gupta
d831c1cb88 fix(portal): url limit reached for stripe customer portal (#7392) 2025-03-21 06:02:49 +00:00
Amlan Kumar Nandy
bc72a0a591 chore: correct isMonotonic field name in metric details (#7393) 2025-03-21 10:36:54 +05:30
Amlan Kumar Nandy
ad2b75e3f0 chore: metris explorer fixes (#7377) 2025-03-20 18:58:15 +00:00
Shivanshu Raj Shrivastava
efd4e30edf fix: publish signoz as package (#7378)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-03-20 15:31:41 +00:00
aniketio-ctrl
f79a5a2db6 fix(metrics-explorer): added updated metadata in list summary (#7381)
* fix(metrics-explorer): added updated metadata in list summary

* fix(metrics-explorer): added skipSignozMetric in aggregate attribute

* fix(metrics-explorer): updated last received query
2025-03-20 18:46:05 +05:30
Piyush Singariya
48a7430165 Merge branch 'main' into feat/msk-integration 2025-03-20 17:47:22 +05:30
Piyush Singariya
3afab1316f fix: changes based on review, added Variables, Units, Legends, SVG 2025-03-20 17:46:54 +05:30
Nityananda Gohain
9d8e46e5b2 fix: move pat and org domains towards postgres multitenancy (#7337)
* fix: inital commit for pat

* fix: add migration file

* fix: add domain changes

* fix: minor fixes

* fix: update migration

* fix: update migration

* fix: update pat and old migration

* fix: move domain and sso type to ee
2025-03-20 13:59:52 +05:30
SagarRajput-7
0320285a25 feat: added context redirection from panels to explorer pages (#7141)
* feat: added context redirection from panels to explorer pages

* feat: added graph coordinate - context redirection

* feat: fixed tooltip overlapping the button

* feat: code fix

* feat: removed unneccesary comment

* feat: added logic to resolve variables

* feat: added better logic to handle specific and panel redirection using query

* feat: added multi query support by datasource to panels redirction

* feat: fixing createbutton display logic

* feat: added logic and ui for specific line redirection

* feat: added logic to compute query with groupby

* feat: code fix and added aysnc await

* feat: added context redirection to fullview and edit view panels (#7252)

* feat: added context redirection to fullview and edit view panels

* feat: restricted redirection query to have only one query

* feat: added is buttonEnabled logic of graphs

* feat: code cleanup

* feat: for one query removed the queryname from onclick button

* feat: removed redirection option from action menu

* feat: redesign the format api flow to avoid delay in clickbutton appearance

* feat: updated the create filter logic for groupBys

* feat: handled the error on format api
2025-03-20 11:29:31 +05:30
Vibhu Pandey
e04e58d8b3 fix(apiserver): remove redundant logs by default (#7375) 2025-03-20 11:01:01 +05:30
Amlan Kumar Nandy
fc03303c29 chore: fix query builder datasource issue in metrics explorer (#7373) 2025-03-20 03:38:16 +00:00
Vibhu Pandey
95b94a9da7 fix(cache): rely on if-modified-since header to serve static files (#7372) 2025-03-20 01:09:52 +05:30
Raj Kamal Singh
22c2cb620d Merge branch 'main' into feat/msk-integration 2025-03-19 18:36:15 +05:30
Amlan Kumar Nandy
3d3dd98549 chore: metrics explorer fixes (#7362) 2025-03-19 17:31:36 +05:30
Amlan Kumar Nandy
097e4ca948 chore: add in-table-search for metric name and type in metrics explorer (#7356) 2025-03-19 13:27:39 +05:30
Raj Kamal Singh
a64908e571 Feat: aws integration support for api gateway (#7232)
* chore: fix typo in RDS overview text

* feat: aws integration: get svc definition for API gateway started

* feat: aws integration: api gateway: add overview dashboard

* feat: aws integration: API gateway: add details of metrics collected

* chore: aws integration: api gateway: remove unnecessary promql query from panels
2025-03-19 07:16:29 +00:00
Raj Kamal Singh
0be9ca272b Chore: Spell fix in AWS ALB integration (#7282)
* chore: spell fix in AWS ALB integration

* chore: only ALB title

---------

Co-authored-by: Piyush Singariya <piyushsingariya@gmail.com>
Co-authored-by: piyushsignoz <piyush@signoz.io>
Co-authored-by: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com>
2025-03-19 07:03:33 +00:00
aniketio-ctrl
e434e3d142 chore: update metric metadata response payload (#7359)
* fix(metrics-explorer): added type in list summary api as filter
2025-03-19 06:46:38 +00:00
SagarRajput-7
4779300dea fix: fixed list panel getting no-data in full view and edit panel (#7320)
* fix: fixed list panel getting no-data in full view and edit panel

* fix: added cloneDeep for updatedQuery

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-03-19 11:58:20 +05:30
aniketio-ctrl
7a225e0a4f fix(metrics-explorer): added type in list summary api as filter (#7357) 2025-03-19 11:30:40 +05:30
Piyush Singariya
9c1c92ad02 Merge branch 'main' into feat/msk-integration 2025-03-19 11:17:01 +05:30
SagarRajput-7
53d3de4909 feat: added label with value (result) in Pie charts (#7322)
* fix: fixed legend format not working for Pie Chart

* fix: added enhancement to legend fit and show, also made pie-chart responsive

* fix: made some css fixes

* fix: css fixes

* feat: added label with value (result) in Pie charts

* feat: added UI for inner radiud to have total value in the center of PIE

* feat: added formatting and unit support to innder total values
2025-03-18 16:34:56 +00:00
SagarRajput-7
4ede88cc1f feat: made value panel responsive and background color to full (#7339)
* feat: made value panel responsive and background color to full

* feat: added css fix

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-03-18 21:58:41 +05:30
Nityananda Gohain
800ca9d329 fix: min step interval to 60 (#7354) 2025-03-18 14:18:12 +00:00
Amlan Kumar Nandy
b3fa1ad60e chore: metrics explorer fixes and improvements (#7334) 2025-03-18 10:57:14 +00:00
aniketio-ctrl
5b6b5bf359 feat(summary): added update metrics metadata api (#7235)
* feat(explorer): updated metadata metrics api| 7076

* feat(explorer): added inspect metrics with resource attribute| 7076

* fix(summary): fixed dashboard name in metric metadata api

* fix(summary): removed offset from second query

* fix(summary): removed offset from second query

* feat(summary): added update metrics metadata api

* feat(summary): resolved log messages

* feat(summary): added is_monotonic column and added temporality| 7077

* feat(summary): added histogram bucket and summary quantile check| 7077

* feat(summary): added temporality and is_monotonic in update queries| 7077

* feat(summary): resolved pr comments| 7077

* feat(inspect): normalized resource attributes

* feat(update-summary): merge conflicts resolve

* feat(update-summary): merge conflicts resolve

* feat(update-metrics): updated description check

* feat(update-metrics): added kv log comments

* fix: updated testcase with reader

* fix: updated testcase with reader

* fix: updated testcase with reader

* fix: updated normalized true in metrics explorer api

* fix: removed inner join from list metrics query
2025-03-18 10:39:34 +00:00
SagarRajput-7
8a479d42ff fix: fixed legend format not working for Pie Chart (#7300)
* fix: fixed legend format not working for Pie Chart

* fix: added enhancement to legend fit and show, also made pie-chart responsive

* fix: made some css fixes

* fix: css fixes
2025-03-18 10:23:58 +00:00
primus-bot[bot]
031e78cb20 chore(release): bump to v0.76.2 (#7351)
#### Summary
 - Release SigNoz v0.76.2
2025-03-18 14:46:19 +05:30
Piyush Singariya
a65e0ebb2f fix: minor suggestions made by ellipsis 2025-03-18 14:42:42 +05:30
Piyush Singariya
ded1a1d824 feat: logs not available in msk 2025-03-18 14:35:19 +05:30
Yunus M
f992ba9106 fix(license): OSS UI build failing due to restrictive active license check (#7345)
* fix: ui breaking due to licenses issue

* feat: handle navigations in case of oss in homepage (#7347)

* feat: handle navigations in case of oss in homepage

* fix: skip datasource and redirect to get-started from services table

---------

Co-authored-by: makeavish <makeavish786@gmail.com>

---------

Co-authored-by: makeavish <makeavish786@gmail.com>
2025-03-18 14:24:26 +05:30
Piyush Singariya
5944fcd934 feat: msk integration 2025-03-18 14:08:33 +05:30
Vibhu Pandey
7118829107 fix(test|lint): fix lint and test in go (#7346)
### Summary
 
- fix lint and test in go
2025-03-18 14:01:48 +05:30
Vikrant Gupta
5aea442939 fix(license): add check for current route for workspace access (#7341) 2025-03-18 04:40:31 +05:30
Prashant Shahi
bf385a9f95 fix: enable 8080 ports for testing standalone and swarm (#7336)
### Summary

- expose SigNoz port 8080 for testing standalone and swarm

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-03-17 21:26:43 +05:30
primus-bot[bot]
c52994ff9e chore(release): bump SigNoz to v0.76.1, OTel Collector to v0.111.34 (#7335)
#### Summary
 - Release SigNoz v0.76.1
 - Bump SigNoz OTel Collector to v0.111.34

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2025-03-17 20:52:03 +05:30
Vikrant Gupta
a1eb52b034 chore(subscription): deprecate trial API (#7325) 2025-03-17 19:33:32 +05:30
Yunus M
c81760bdf7 feat: update ui based on license states (#7328)
* feat: update ui based on license states

* feat: update license based ui messaging (#7330)

* fix: billing test cases
2025-03-17 19:27:45 +05:30
Vikrant Gupta
c26277cd42 chore(subscription): update the checkout and portal endpoints to use zeus (#7310)
### Summary

- update the checkout and portal endpoints to use `zeus` instead of license server
2025-03-17 15:22:04 +05:30
Shivanshu Raj Shrivastava
458bd1171b fix: give correct span counts via waterfall api (#7287)
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-03-17 05:33:06 +00:00
Vibhu Pandey
a806ddf74f ci(codeowners): add codeowners for core pkg packages (#7317) 2025-03-17 09:01:58 +05:30
Yunus M
b039dc6fa7 feat: in product home page (#7270)
* feat: base setup for in product home page

* feat: base state

* feat: add empty states for alerts, traces, dashboards, saved views

* feat: add checklist component

* feat: integrate all panels

* feat: integrate preference api and clean up components

* feat: handle done and skip states of the checklist

* feat: update ui

* feat: update ui

* feat: code cleanup

* feat: add events

* feat: support time interval change in services

* feat: add service time change event and cleanup code

* feat: handle light mode

* feat: address review comments

* fix: routing issues

* fix: testcase snapshot, a minor ui improvements

* fix: noopener typo in window.open
2025-03-16 19:41:08 +05:30
Vikrant Gupta
08309c380c fix(usage): set default tenant for exporting usage (#7314) 2025-03-16 15:00:31 +05:30
Vibhu Pandey
f96a234faa fix(alertmanager): send empty array instead of nil (#7312) 2025-03-14 21:16:37 +05:30
primus-bot[bot]
de53119257 chore(release): bump to v0.76.0 (#7311)
#### Summary
 - Release SigNoz vv0.76.0

 Created by [Primus-Bot](https://github.com/apps/primus-bot)
2025-03-14 17:41:24 +05:30
Prashant Shahi
eee3f7d549 feat: signoz package and deprecation of frontend, alertmanager (#7301)
### Summary

- signoz package with goreleaser
- frontend deprecation
- alertmanager deprecation

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-03-14 17:23:10 +05:30
Shaheer Kochai
32d144845a fix: update the auto-complete API reading logic (#7254) 2025-03-14 02:50:04 +00:00
Nityananda Gohain
e614d6b0e9 fix: handle 0 step interval (#7307) 2025-03-13 22:20:54 +05:30
Vibhu Pandey
7f71c0ed2d docs(contributing): revamp contributing docs (#7290) 2025-03-13 19:36:55 +05:30
Dāvis
50c1af2da8 fix: include frontend with oss query-service
Co-authored-by: Vibhu Pandey <vibhupandey28@gmail.com>
2025-03-13 18:34:20 +05:30
Nityananda Gohain
b46f0c9a7b fix: use correct created_at and updated_at (#7305) 2025-03-13 09:44:06 +00:00
aniketio-ctrl
9df23bc1ed feat: Add inspect metrics API (#7197)
* feat(inspect): added inspect metric api | 7076

* feat(inspect): added inspect metric api | 7076

* feat(inspect): added inspect metric api | 7076

* feat(inspect): removed underscore label keys

* feat(explorer): updated metadata metrics api| 7076

* feat(explorer): added inspect metrics with resource attribute| 7076

* fix(summary): fixed dashboard name in metric metadata api

* fix(summary): removed offset from second query

* fix(summary): removed offset from second query

* feat(inspect): normalized resource attributes

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-03-13 14:57:27 +05:30
Srikanth Chekuri
0035ae0072 chore: update infra monitoring events (#7268) 2025-03-13 06:04:44 +00:00
Amlan Kumar Nandy
86a888a6a2 chore: metrics explorer improvements (#7285) 2025-03-13 05:13:02 +00:00
Nityananda Gohain
9a3c49bce4 Store complete intervals in cache and update logic for response (#7212)
* fix: new implementation for finding missing timerange

* fix: remove unwanted code

* fix: update if condition

* fix: update logic and the test cases

* fix: correct name

* fix: filter points which are not a complete agg interval

* fix: fix the logic to use the points correctly

* fix: fix overlapping test case

* fix: add comments

* Update pkg/query-service/querycache/query_range_cache.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: use step ms

* fix: use step ms

* fix: tests

* fix: update logic to handle actual empty series

* fix: name updated

* Update pkg/query-service/app/querier/v2/helper.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: address comments

* fix: address comments

* fix: address comments

* Update pkg/query-service/common/query_range.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: add error log

* fix: handle case where end is equal to a complete window end

* fix: added comments

* fix: address comments

* fix: move function to common query range

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-03-13 04:34:06 +00:00
Vibhu Pandey
946a249c85 refactor(e2e): remove e2e package (#7265)
### Summary

remove e2e package

#### Related Issues / PR's

Unused
2025-03-13 01:41:34 +05:30
Vibhu Pandey
06be0f4330 fix(alertmanager): fix templating (#7288)
### Summary

- Fix templating of alertmanagerURL to include links to our alerts page
- Return an empty array instead of null in contextlinks
2025-03-12 23:15:58 +05:30
Nityananda Gohain
5dba1f3dbb fix: use correct keys in dashboard list (#7283)
* fix: use correct keys in dashboard list

* fix: use correct keys in dashboard list
2025-03-12 19:37:54 +05:30
Nityananda Gohain
a9618886b9 fix: support multitenancy in dashboards & savedviews (#7237)
* fix: support multitenancy in dashboards

* fix: support multitenancy in saved views

* fix: move migrations to provider file

* fix: remove getUserFromClaims and use new errors

* fix: remove getUserFromClaims and use new errors

* fix: use new errors in dashboards.go

* Update ee/query-service/app/api/dashboard.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* fix: minor changes

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-03-12 17:18:11 +05:30
Yunus M
d576dec156 feat: update ingestion details api in new onboarding flow (#7277)
* feat: update ingestion details api to use the new api

* feat: handle select environment entity

* feat: handle invalid date in hasDatePassed function
2025-03-12 14:04:23 +05:30
Yunus M
2c7dfd748f fix: update design for light mode (#7271) 2025-03-12 09:21:38 +05:30
CheetoDa
6effcbd413 chore: onboarding fixes and added new datasource (#7236) 2025-03-12 01:32:25 +05:30
Prashant Shahi
1224a03229 ci: tsc and commitci workflows (#7269)
### Summary

- tsc and commitci workflows
2025-03-11 22:11:49 +05:30
Prashant Shahi
239ea9d34c ci(js): ci workflow for js (#7267)
- ci(js): ci workflow for js
2025-03-11 19:10:54 +05:30
Prashant Shahi
494d2460a0 ci: build/test/lint GH workflows for Go (#7266)
build/test/lint GH workflows for Go
2025-03-11 17:24:19 +05:30
Prashant Shahi
31fb6727c2 ci: remove unused GitHub workflows (#7264)
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-03-11 16:15:19 +05:30
Shaheer Kochai
1e85981a17 fix: display same key with multiple data types in filter suggestions by enhancing the deduping logic (#7255)
* fix: display same key with multiple data types in filter suggestions by enhancing the deduping logic

* refactor: remove unnecessary .trim()
2025-03-11 09:03:26 +00:00
Amlan Kumar Nandy
527d8a4fc9 feat: implement explore section in metrics explorer (#7256) 2025-03-11 06:48:09 +00:00
Amlan Kumar Nandy
1b758a088c feat: implement metric details view in metrics explorer (#7238) 2025-03-11 06:32:08 +00:00
Vishal Sharma
8fbf8155de fix: ignore identity for default user (#7257) 2025-03-11 08:23:10 +05:30
SagarRajput-7
e92e0b6e29 fix: fixed handling of how and when to set ALL option (#7244) 2025-03-10 14:18:55 +05:30
Vibhu Pandey
1f33928bf9 feat(alertmanager): integrate with ruler (#7222)
### Summary

Integrate the new implementations of the alertmanager along with changes to the ruler. This change can be broadly categoried into 3 parts:

#### Frontend
- The earlier `/api/v1/alerts` api was double encoding the response in json and sending it to the frontend. This PR fixes the json response object. 

For instance, we have gone from the response `{
    "status": "success",
    "data": "{\"status\":\"success\",\"data\":[{\"labels\":{\"alertname\":\"[platform][consumer] consumer is above 100% memory utilization\",\"bu\":\"platform\",\"......
}` to the response `{"status":"success","data":[{"labels":{"alertname":"[Metrics] Pod CP......`

- `msteams` has been changed to `msteamsv2` wherever applicable

#### Ruler
The following changes have been done in the ruler component:

- Removal of the old alertmanager and notifier
- The RuleDB methods `Create`, `Edit` and `Delete` have been made transactional
- Introduction of a new `testPrepareNotifyFunc` for sending test notifications
- Integration with the new alertmanager

#### Alertmanager
Although a huge chunk of the alertmanagers have been merged in previous PRs (the list can be found at https://github.com/SigNoz/platform-pod/issues/404), this PR takes care of changes needed in order to incorporate it with the ruler

- Addition of ruleId based matching
- Support for marshalling the global configuration directly from the upstream alertmanager
- Addition of orgId to the legacy alertmanager
- Support for always adding defaults to both routes and receivers while creating them
- Migration to create the required alertmanager tables
- Migration for msteams to msteamsv2 has been added. We will start using msteamv2 config for the new alertmanager and keep using msteams for the old one.

#### Related Issues / PR's

Closes https://github.com/SigNoz/platform-pod/issues/404
Closes https://github.com/SigNoz/platform-pod/issues/176
2025-03-09 20:00:42 +00:00
Srikanth Chekuri
8abba261a8 fix: add missing send_resolved for email/pager/opsgenie edit payload (#7240) 2025-03-08 18:09:57 +05:30
Shivanshu Raj Shrivastava
8f2e8cccb4 New autocomplete endpoint with filters (#7241)
* feat: new autocomplete endpoint with filters
Signed-off-by: Shivanshu Raj Shrivastava <shivanshu1333@gmail.com>
2025-03-08 11:42:20 +05:30
Vishal Sharma
256fbfc180 chore: add new user preferences for welcome checklist (#7239) 2025-03-07 19:48:18 +05:30
Yunus M
d362f5bce3 feat: update logEvent to pass eventType and replace segment calls wit… (#7209)
* feat: update logEvent to pass eventType and replace segment calls with logEvent

* feat: update logEvent to handle rate limiting

---------

Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
2025-03-07 15:42:51 +05:30
dependabot[bot]
42f7511e06 chore(deps): bump github.com/go-jose/go-jose/v4 from 4.0.2 to 4.0.5 (#7180)
Bumps [github.com/go-jose/go-jose/v4](https://github.com/go-jose/go-jose) from 4.0.2 to 4.0.5.
- [Release notes](https://github.com/go-jose/go-jose/releases)
- [Changelog](https://github.com/go-jose/go-jose/blob/main/CHANGELOG.md)
- [Commits](https://github.com/go-jose/go-jose/compare/v4.0.2...v4.0.5)

---
updated-dependencies:
- dependency-name: github.com/go-jose/go-jose/v4
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-07 03:12:55 +00:00
Vishal Sharma
819428ad09 chore: add identify and group event support to /event API (#7219)
* chore: add identify and group event support to /event API

* chore: minor refactor
2025-03-07 00:23:47 +05:30
Shaheer Kochai
29fa5c3cf0 fix the issue of logs preview and count mismatch in pipelines by updating sample logs query param from limit to pageSize (#7231) 2025-03-06 14:55:23 +00:00
Raj Kamal Singh
d09b85bea8 feat: aws integration: support for lambda (#7196)
* feat: aws integration: add service definition for lambda

* feat: aws integration: lambda: add details of metrics collected

* feat: aws integrations: lambda overview: use sum for relevant metrics
2025-03-06 13:03:46 +00:00
791 changed files with 29183 additions and 7897 deletions

View File

@@ -0,0 +1,73 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: clickhouse
volumes:
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
- ${PWD}/fs/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
- ${PWD}/fs/tmp/var/lib/clickhouse/:/var/lib/clickhouse/
- ${PWD}/fs/tmp/var/lib/clickhouse/user_scripts/:/var/lib/clickhouse/user_scripts/
ports:
- '127.0.0.1:8123:8123'
- '127.0.0.1:9000:9000'
tty: true
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
depends_on:
- zookeeper
zookeeper:
image: bitnami/zookeeper:3.7.1
container_name: zookeeper
volumes:
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
ports:
- '127.0.0.1:2181:2181'
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
schema-migrator-sync:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-sync
command:
- sync
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-async
command:
- async
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
restart: on-failure

View File

@@ -0,0 +1,47 @@
<clickhouse replace="true">
<logger>
<level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>3</count>
</logger>
<display_name>cluster</display_name>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<user_directories>
<users_xml>
<path>users.xml</path>
</users_xml>
<local_directory>
<path>/var/lib/clickhouse/access/</path>
</local_directory>
</user_directories>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<remote_servers>
<cluster>
<shard>
<replica>
<host>clickhouse</host>
<port>9000</port>
</replica>
</shard>
</cluster>
</remote_servers>
<zookeeper>
<node>
<host>zookeeper</host>
<port>2181</port>
</node>
</zookeeper>
<macros>
<shard>01</shard>
<replica>01</replica>
</macros>
</clickhouse>

View File

@@ -0,0 +1,36 @@
<?xml version="1.0"?>
<clickhouse replace="true">
<profiles>
<default>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>in_order</load_balancing>
<log_queries>1</log_queries>
</default>
</profiles>
<users>
<default>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<quota>default</quota>
<access_management>1</access_management>
<named_collection_control>1</named_collection_control>
<show_named_collections>1</show_named_collections>
<show_named_collections_secrets>1</show_named_collections_secrets>
</default>
</users>
<quotas>
<default>
<interval>
<duration>3600</duration>
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

5
.github/CODEOWNERS vendored
View File

@@ -6,5 +6,8 @@
/frontend/src/container/MetricsApplication @srikanthccv
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
/deploy/ @SigNoz/devops
/sample-apps/ @SigNoz/devops
.github @SigNoz/devops
/pkg/config/ @grandwizard28
/pkg/errors/ @grandwizard28
/pkg/factory/ @grandwizard28
/pkg/types/ @grandwizard28

View File

@@ -7,13 +7,6 @@ on:
- release/v*
jobs:
check-no-ee-references:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
build-frontend:
runs-on: ubuntu-latest
steps:
@@ -21,42 +14,12 @@ jobs:
uses: actions/checkout@v4
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
- name: Build frontend static files
shell: bash
run: |
make build-frontend-amd64
make build-frontend-static
build-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-query-service:
build-signoz:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -64,17 +27,13 @@ jobs:
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Run tests
go-version: "1.22"
- name: Build signoz image
shell: bash
run: |
make test
- name: Build query-service image
shell: bash
run: |
make build-query-service-amd64
make build-signoz-amd64
build-ee-query-service:
build-signoz-community:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -82,8 +41,8 @@ jobs:
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build EE query-service image
go-version: "1.22"
- name: Build signoz community image
shell: bash
run: |
make build-ee-query-service-amd64
make build-signoz-community-amd64

View File

@@ -1,17 +0,0 @@
name: Codeball
on: [pull_request]
jobs:
codeball_job:
runs-on: ubuntu-latest
name: Codeball
steps:
# Run Codeball on all new Pull Requests 🚀
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
- name: Codeball
uses: sturdy-dev/codeball-action@v2
with:
approvePullRequests: "true"
labelPullRequestsWhenApproved: "true"
labelPullRequestsWhenReviewNeeded: "false"
failJobsWhenReviewNeeded: "false"

View File

@@ -1,71 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main, v* ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '32 5 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

39
.github/workflows/commitci.yaml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: commitci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
refcheck:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: check
run: |
if grep -R --include="*.go" '.*/ee/.*' pkg/; then
echo "Error: Found references to 'ee' packages in 'pkg' directory"
exit 1
else
echo "No references to 'ee' packages found in 'pkg' directory"
fi
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: lint
uses: wagoid/commitlint-github-action@v5

View File

@@ -1,13 +0,0 @@
name: commitlint
on: [pull_request]
defaults:
run:
working-directory: frontend
jobs:
lint-commits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v5

View File

@@ -1,27 +0,0 @@
on:
pull_request_target:
types:
- closed
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
jobs:
create_issue_on_merge:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:
- name: Checkout Codebase
uses: actions/checkout@v4
with:
repository: signoz/gh-bot
- name: Use Node v16
uses: actions/setup-node@v4
with:
node-version: 16
- name: Setup Cache & Install Dependencies
uses: bahmutov/npm-install@v1
with:
install-command: yarn --frozen-lockfile
- name: Comment on PR
run: node create-issue.js

View File

@@ -1,22 +0,0 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
with:
fail-on-severity: high
uses: actions/dependency-review-action@v3

View File

@@ -1,93 +0,0 @@
name: e2e-k3s
on:
pull_request:
types: [labeled]
jobs:
e2e-k3s:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'ok-to-test' }}
env:
DOCKER_TAG: pull-${{ github.event.number }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build query-service image
env:
DEV_BUILD: 1
run: make build-ee-query-service-amd64
- name: Build frontend image
run: make build-frontend-amd64
- name: Create a k3s cluster
uses: AbsaOSS/k3d-action@v2
with:
cluster-name: "signoz"
- name: Inject the images to the cluster
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
- name: Set up HotROD sample-app
run: |
# create sample-application namespace
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
- name: Deploy the app
run: |
# add signoz helm repository
helm repo add signoz https://charts.signoz.io
# create platform namespace
kubectl create ns platform
# installing signoz using helm
helm install my-release signoz/signoz -n platform \
--wait \
--timeout 10m0s \
--set frontend.service.type=LoadBalancer \
--set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl get pods -n platform
kubectl get svc -n platform
- name: Kick off a sample-app workload
run: |
# start the locust swarm
kubectl --namespace sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
id: get-subdomain
run: |
subdomain="pr-$(git rev-parse --short HEAD)"
echo "URL for tunnelling: https://$subdomain.loca.lt"
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT
worker_ip="$(curl -4 -s ipconfig.io/ip)"
echo "Worker node IP address: $worker_ip"
- name: Start tunnel
env:
SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }}
run: |
npm install -g localtunnel
host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4)
port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1)
lt -p $port -l $host -s $SUBDOMAIN

36
.github/workflows/goci.yaml vendored Normal file
View File

@@ -0,0 +1,36 @@
name: goci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
test:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-test.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GO_TEST_CONTEXT: ./...
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-fmt.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-lint.yaml@main
secrets: inherit
with:
PRIMUS_REF: main

View File

@@ -1,9 +1,8 @@
name: goreleaser
name: gor-histogramquantile
on:
push:
tags:
- v*
- histogram-quantile/v*
permissions:

View File

@@ -0,0 +1,155 @@
name: gor-signoz-community
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: build-frontend
run: make build-frontend-static
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
WORKDIR: pkg/query-service
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

168
.github/workflows/gor-signoz.yaml vendored Normal file
View File

@@ -0,0 +1,168 @@
name: gor-signoz
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: dotenv-frontend
working-directory: frontend
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > .env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> .env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> .env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> .env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> .env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> .env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
- name: build-frontend
run: make build-frontend-static
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: ee/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,32 +0,0 @@
name: Jest Coverage - changed files
on:
pull_request:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/main"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch
run: git fetch origin ${{ github.event.pull_request.head.ref }}
- run: |
git checkout ${{ github.event.pull_request.head.sha }}
- uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Install dependencies
run: cd frontend && npm install -g yarn && yarn
- name: npm run test:changedsince
run: cd frontend && npm run i18n:generate-hash && npm run test:changedsince

50
.github/workflows/jsci.yaml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: jsci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
tsc:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: install
run: cd frontend && yarn install
- name: tsc
run: cd frontend && yarn tsc
test:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-test.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-fmt.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-lint.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend

View File

@@ -1,24 +0,0 @@
name: Playwright Tests
on: [pull_request]
jobs:
playwright:
defaults:
run:
working-directory: frontend
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: "16.x"
- name: Install dependencies
run: CI=1 yarn install
- name: Install Playwright
run: npx playwright install --with-deps
- name: Run Playwright tests
run: yarn playwright
env:
# This might depend on your test-runner/language binding
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}

18
.github/workflows/postci.yaml vendored Normal file
View File

@@ -0,0 +1,18 @@
name: postci
on:
pull_request_target:
branches:
- main
types:
- synchronize
jobs:
remove:
if: github.event.pull_request.head.repo.fork || github.event.pull_request.user.login == 'dependabot[bot]'
uses: signoz/primus.workflows/.github/workflows/github-label.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GITHUB_LABEL_ACTION: remove
GITHUB_LABEL_NAME: safe-to-test

View File

@@ -8,56 +8,27 @@ on:
- v*
jobs:
image-build-and-push-query-service:
image-build-and-push-signoz:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- name: checkout
uses: actions/checkout@v4
- name: Setup golang
- name: setup
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU
go-version: "1.22"
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
- name: setup-buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
- name: docker-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-query-service
image-build-and-push-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
- name: create-env-file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
@@ -70,140 +41,94 @@ jobs:
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
- name: branch-name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
- name: docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-ee-query-service
image-build-and-push-frontend:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
- name: publish-signoz
run: make build-push-signoz
- name: qs-docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
echo "DOCKER_TAG=${tag}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend
- name: publish-query-service
run: |
SIGNOZ_DOCKER_IMAGE=query-service make build-push-signoz
image-build-and-push-frontend-ee:
image-build-and-push-signoz-community:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- name: checkout
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
- name: setup-go
uses: actions/setup-go@v4
with:
go-version: "1.22"
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
- name: docker-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
- name: branch-name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
- name: docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: cross-compilation-tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: publish-signoz-community
run: make build-push-signoz-community
- name: qs-docker-tag
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend
- name: publish-query-service-oss
run: |
SIGNOZ_COMMUNITY_DOCKER_IMAGE=query-service make build-push-signoz-community

View File

@@ -8,12 +8,6 @@ jobs:
remove:
runs-on: ubuntu-latest
steps:
- name: Remove label ok-to-test from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: ok-to-test
type: remove
token: ${{ secrets.GITHUB_TOKEN }}
- name: Remove label testing-deploy from PR
uses: buildsville/add-remove-label@v2.0.0
with:

View File

@@ -1,25 +0,0 @@
name: sonar
on:
pull_request:
branches:
- main
paths:
- 'frontend/**'
defaults:
run:
working-directory: frontend
jobs:
sonar-analysis:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Sonar analysis
uses: sonarsource/sonarcloud-github-action@master
with:
projectBaseDir: frontend
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

View File

@@ -49,8 +49,7 @@ jobs:
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make build-signoz-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -49,8 +49,7 @@ jobs:
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}
git checkout --track origin/${GITHUB_BRANCH}
make build-ee-query-service-amd64
make build-frontend-amd64
make build-signoz-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

7
.gitignore vendored
View File

@@ -54,6 +54,7 @@ ee/query-service/tests/test-deploy/data/
bin/
.local/
*/query-service/queries.active
ee/query-service/db
# e2e
@@ -77,4 +78,8 @@ dist/
# ignore user_scripts that is fetched by init-clickhouse
deploy/common/clickhouse/user_scripts/
queries.active
queries.active
# .devenv tmp files
.devenv/**/tmp/**

View File

@@ -16,10 +16,8 @@ tasks:
yarn dev
ports:
- port: 3301
onOpen: open-browser
- port: 8080
onOpen: ignore
onOpen: open-browser
- port: 9000
onOpen: ignore
- port: 8123

View File

@@ -1,389 +1,79 @@
# Contributing Guidelines
## Welcome to SigNoz Contributing section 🎉
Thank you for your interest in contributing to our project! We greatly value feedback and contributions from our community. This document will guide you through the contribution process.
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
## How can I contribute?
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
### Finding Issues to Work On
- Check our [existing open issues](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue)
- Look for [good first issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with
- Review [recently closed issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) to avoid duplicates
- We accept contributions made to the [SigNoz `develop` branch]()
- Find all SigNoz Docker Hub images here
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
### Types of Contributions
## Finding contributions to work on 💬
1. **Report Bugs**: Use our [Bug Report template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
2. **Request Features**: Submit using [Feature Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
3. **Improve Documentation**: Create an issue with the `documentation` label
4. **Report Performance Issues**: Use our [Performance Issue template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=)
5. **Request Dashboards**: Submit using [Dashboard Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+)
6. **Report Security Issues**: Follow our [Security Policy](https://github.com/SigNoz/signoz/security/policy)
7. **Join Discussions**: Participate in [project discussions](https://github.com/SigNoz/signoz/discussions)
Looking at the existing issues is a great way to find something to contribute on.
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
### Creating Helpful Issues
When creating issues, include:
## Sections:
- [General Instructions](#1-general-instructions-)
- [For Creating Issue(s)](#11-for-creating-issues)
- [For Pull Requests(s)](#12-for-pull-requests)
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
- [Develop Frontend](#3-develop-frontend-)
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
- [Other Ways to Contribute](#other-ways-to-contribute)
- **For Feature Requests**:
- Clear use case and requirements
- Proposed solution or improvement
- Any open questions or considerations
# 1. General Instructions 📝
- **For Bug Reports**:
- Step-by-step reproduction steps
- Version information
- Relevant environment details
- Any modifications you've made
- Expected vs actual behavior
## 1.1 For Creating Issue(s)
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
### Submitting Pull Requests
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
1. **Development**:
- Setup your [development environment](docs/contributing/development.md)
- Work against the latest `main` branch
- Focus on specific changes
- Ensure all tests pass locally
- Follow our [commit convention](#commit-convention)
#### Details like these are incredibly useful:
2. **Submit PR**:
- Ensure your branch can be auto-merged
- Address any CI failures
- Respond to review comments promptly
- **Requirement** - what kind of use case are you trying to solve?
- **Proposal** - what do you suggest to solve the problem or improve the existing
situation?
- Any open questions to address❓
For substantial changes, please split your contribution into multiple PRs:
#### If you are reporting a bug, details like these are incredibly useful:
1. First PR: Overall structure (README, configurations, interfaces)
2. Second PR: Core implementation (split further if needed)
3. Final PR: Documentation updates and end-to-end tests
- A reproducible test case or series of steps.
- The version of our code being used.
- Any modifications you've made relevant to the bug🐞.
- Anything unusual about your environment or deployment.
### Commit Convention
Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone 🙌.
We follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/). All commits and PRs should include type specifiers (e.g., `feat:`, `fix:`, `docs:`, etc.).
**[`^top^`](#contributing-guidelines)**
<hr>
## How can I contribute to other repositories?
## 1.2 For Pull Request(s)
You can find other repositories in the [SigNoz](https://github.com/SigNoz) organization to contribute to. Here is a list of **highlighted** repositories:
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
Before sending us a pull request, please ensure that,
- [charts](https://github.com/SigNoz/charts)
- [dashboards](https://github.com/SigNoz/dashboards)
- Fork the SigNoz repo on GitHub, clone it on your machine.
- Create a branch with your changes.
- You are working against the latest source on the `develop` branch.
- Modify the source; please focus only on the specific change you are contributing.
- Ensure local tests pass.
- Commit to your fork using clear commit messages.
- Send us a pull request, answering any default questions in the pull request interface.
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
- Once the change has been approved and merged, we will inform you in a comment.
Each repository has its own contributing guidelines. Please refer to the guidelines of the repository you want to contribute to.
## How can I get help?
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
Need assistance? Join our Slack community:
- [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M)
- [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B)
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
## Where do I go from here?
* 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc...
* This PR is usually trivial to review, so the size limit does not apply to
it.
* 2⃣ Second PR should include the concrete implementation of the component. If the
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
multiple PRs.
* If there are multiple sub-component then ideally each one should be implemented as
a **separate** pull request.
* Last PR should include changes to **any user-facing documentation.** And should include
end-to-end tests if applicable. The component must be enabled
only after sufficient testing, and there is enough confidence in the
stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
### Pointers:
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
<hr>
### Conventions to follow when submitting Commits and Pull Request(s).
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
**[`^top^`](#contributing-guidelines)**
<hr>
# 2. How to Contribute 🙋🏻‍♂️
#### There are primarily 2 areas in which you can contribute to SigNoz
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
**[`^top^`](#contributing-guidelines)**
<hr>
# 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
```
ports:
- "8080:8080"
```
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
- Next run,
```
cd deploy/docker
sudo docker compose up -d
```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
FRONTEND_API_ENDPOINT=http://localhost:3301
```
If not:
```
FRONTEND_API_ENDPOINT=http://localhost:8080
```
- Next,
```
yarn install
yarn dev
```
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
- Clone the SigNoz repository and cd into signoz/frontend directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
````
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
- Next,
```
yarn install
yarn dev
```
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
**[`^top^`](#contributing-guidelines)**
<hr>
# 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
```
ports:
- 9001:9000
```
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
#### Run locally,
```
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
```
#### Build and Run locally
```
cd pkg/query-service
go build -o build/query-service main.go
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
```
#### Docker Images
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
```
docker pull signoz/query-service
```
```
docker pull signoz/query-service:latest
```
```
docker pull signoz/query-service:develop
```
### Important Note:
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
Click the button below. A workspace with all required environments will be created.
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz)
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
**[`^top^`](#contributing-guidelines)**
<hr>
# 5. Contribute to SigNoz Helm Chart 📊
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
## 5.1 To run helm chart for local development
- Clone the SigNoz repository and cd into charts directory,
```
git clone https://github.com/SigNoz/charts.git && cd charts
```
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
- next run,
```
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
```
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
**5.1.2 To load data with the HotROD sample app:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
```
**5.1.3 To stop the load generation:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```
**[`^top^`](#contributing-guidelines)**
---
# 6. Contribute to Dashboards 📈
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
1. Create a dashboard JSON file.
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
3. Include screenshots of the dashboard in the `assets/` directory.
4. Submit a pull request for review.
## Other Ways to Contribute
There are many other ways to get involved with the community and to participate in this project:
- Use the product, submitting GitHub issues when a problem is found.
- Help code review pull requests and participate in issue threads.
- Submit a new feature request as an issue.
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
- Tell others about the project on Twitter, your blog, etc.
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
Thank You!
- Set up your [development environment](docs/contributing/development.md)

211
Makefile
View File

@@ -7,8 +7,10 @@ BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
LICENSE_SIGNOZ_IO ?= https://license.signoz.io/api/v1
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_ZEUS_URL ?= https://api.staging.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants.
@@ -18,18 +20,19 @@ EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker
SWARM_DIRECTORY ?= deploy/docker-swarm
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
GORELEASER_BIN ?= goreleaser
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
GOPATH ?= $(shell go env GOPATH)
REPONAME ?= signoz
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
DOCKER_TAG ?= $(BUILD_VERSION)
SIGNOZ_DOCKER_IMAGE ?= signoz
SIGNOZ_COMMUNITY_DOCKER_IMAGE ?= signoz-community
# Build-time Go variables
PACKAGE?=go.signoz.io/signoz
PACKAGE?=github.com/SigNoz/signoz
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
@@ -37,10 +40,46 @@ gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
PROD_LD_FLAGS=-X ${zeusURL}=${ZEUS_URL} -X ${licenseSignozIo}=${LICENSE_SIGNOZ_IO}
DEV_LD_FLAGS=-X ${zeusURL}=${DEV_ZEUS_URL} -X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service
##############################################################
# common commands
##############################################################
.PHONY: help
help: ## Displays help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[a-z0-9A-Z_-]+:.*?##/ { printf " \033[36m%-40s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
##############################################################
# devenv commands
##############################################################
.PHONY: devenv-clickhouse
devenv-clickhouse: ## Run clickhouse in devenv
@cd .devenv/docker/clickhouse; \
docker compose -f compose.yaml up -d
##############################################################
# run commands
##############################################################
.PHONY: run-go
run-go: ## Runs the go backend server
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
SIGNOZ_WEB_ENABLED=false \
SIGNOZ_JWT_SECRET=secret \
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
./ee/query-service/main.go \
--config ./pkg/query-service/config/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
all: build-push-frontend build-push-signoz
# Steps to build static files of frontend
build-frontend-static:
@@ -53,95 +92,67 @@ build-frontend-static:
yarn build && \
ls -l build
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64: build-frontend-static
# Steps to build static binary of signoz
.PHONY: build-signoz-static
build-signoz-static:
@echo "------------------"
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend: build-frontend-static
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build static binary of query service
.PHONY: build-query-service-static
build-query-service-static:
@echo "------------------"
@echo "--> Building query-service static binary"
@echo "--> Building signoz static binary"
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
cd $(EE_QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/signoz-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${PROD_LD_FLAGS}"; \
fi
.PHONY: build-query-service-static-amd64
build-query-service-static-amd64:
make GOARCH=amd64 build-query-service-static
.PHONY: build-signoz-static-amd64
build-signoz-static-amd64:
make GOARCH=amd64 build-signoz-static
.PHONY: build-query-service-static-arm64
build-query-service-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
.PHONY: build-signoz-static-arm64
build-signoz-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-signoz-static
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
# Steps to build static binary of signoz for all platforms
.PHONY: build-signoz-static-all
build-signoz-static-all: build-signoz-static-amd64 build-signoz-static-arm64 build-frontend-static
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
# Steps to build and push docker image of signoz
.PHONY: build-signoz-amd64 build-push-signoz
# Step to build docker image of signoz in amd64 (used in build pipeline)
build-signoz-amd64: build-signoz-static-amd64 build-frontend-static
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "--> Building signoz docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
@docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service: build-query-service-static-all
build-push-signoz: build-signoz-static-all
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "--> Building and pushing signoz docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
--push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
--tag $(REPONAME)/$(SIGNOZ_DOCKER_IMAGE):$(DOCKER_TAG) .
# Step to build EE docker image of query service in amd64 (used in build pipeline)
build-ee-query-service-amd64:
# Step to build docker image of signoz community in amd64 (used in build pipeline)
build-signoz-community-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "--> Building signoz docker image for amd64"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-signoz-amd64
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service:
# Step to build and push docker image of signoz community in amd64 and arm64 (used in push pipeline)
build-push-signoz-community:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
dev-setup:
mkdir -p /var/lib/signoz
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
mkdir -p pkg/query-service/config/dashboards
@echo "------------------"
@echo "--> Local Setup completed"
@echo "--> Building and pushing signoz community docker image"
@echo "------------------"
make EE_QUERY_SERVICE_DIRECTORY=${QUERY_SERVICE_DIRECTORY} SIGNOZ_DOCKER_IMAGE=${SIGNOZ_COMMUNITY_DOCKER_IMAGE} build-push-signoz
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
@@ -155,22 +166,6 @@ run-testing:
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-standalone-ch:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
check-no-ee-references:
@echo "Checking for 'ee' package references in 'pkg' directory..."
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
@@ -183,14 +178,42 @@ check-no-ee-references:
test:
go test ./pkg/...
goreleaser-snapshot:
########################################################
# Goreleaser
########################################################
.PHONY: gor-snapshot gor-snapshot-histogram-quantile gor-snapshot-signoz gor-snapshot-signoz-community gor-split gor-split-histogram-quantile gor-split-signoz gor-split-signoz-community gor-merge
gor-snapshot:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
cd ${GORELEASER_WORKDIR} && \
goreleaser release --clean --snapshot; \
cd -; \
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --snapshot; \
else \
goreleaser release --clean --snapshot; \
${GORELEASER_BIN} release --clean --snapshot; \
fi
goreleaser-snapshot-histogram-quantile:
gor-snapshot-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
gor-snapshot-signoz: build-frontend-static
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
gor-snapshot-signoz-community: build-frontend-static
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-snapshot
gor-split:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
${GORELEASER_BIN} release --config ${GORELEASER_WORKDIR}/.goreleaser.yaml --clean --split; \
else \
${GORELEASER_BIN} release --clean --split; \
fi
gor-split-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-split
gor-split-signoz: build-frontend-static
make GORELEASER_WORKDIR=$(EE_QUERY_SERVICE_DIRECTORY) goreleaser-split
gor-split-signoz-community: build-frontend-static
make GORELEASER_WORKDIR=$(QUERY_SERVICE_DIRECTORY) goreleaser-split
gor-merge:
${GORELEASER_BIN} continue --merge

4
conf/cache-config.yml Normal file
View File

@@ -0,0 +1,4 @@
provider: "inmemory"
inmemory:
ttl: 60m
cleanupInterval: 10m

View File

@@ -70,26 +70,76 @@ sqlstore:
##################### APIServer #####################
apiserver:
timeout:
# Default request timeout.
default: 60s
# Maximum request timeout.
max: 600s
# List of routes to exclude from request timeout.
excluded_routes:
- /api/v1/logs/tail
- /api/v3/logs/livetail
logging:
# List of routes to exclude from request responselogging.
excluded_routes:
- /api/v1/health
- /api/v1/version
- /
##################### TelemetryStore #####################
telemetrystore:
# specifies the telemetrystore provider to use.
# Specifies the telemetrystore provider to use.
provider: clickhouse
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
# Maximum number of idle connections in the connection pool.
max_idle_conns: 50
# Maximum number of open connections to the database.
max_open_conns: 100
# Maximum time to wait for a connection to be established.
dial_timeout: 5s
dial_timeout: 5s
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
##################### Alertmanager #####################
alertmanager:
# Specifies the alertmanager provider to use.
provider: legacy
legacy:
# The API URL (with prefix) of the legacy Alertmanager instance.
api_url: http://localhost:9093/api
signoz:
# The poll interval for periodically syncing the alertmanager with the config in the store.
poll_interval: 1m
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
external_url: http://localhost:9093
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
global:
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
resolve_timeout: 5m
route:
# GroupByStr is the list of labels to group alerts by.
group_by:
- alertname
# GroupInterval is the interval at which alerts are grouped.
group_interval: 1m
# GroupWait is the time to wait before sending alerts to receivers.
group_wait: 1m
# RepeatInterval is the interval at which alerts are repeated.
repeat_interval: 1h
alerts:
# Interval between garbage collection of alerts.
gc_interval: 30m
silences:
# Maximum number of silences, including expired silences. If negative or zero, no limit is set.
max: 0
# Maximum size of the silences in bytes. If negative or zero, no limit is set.
max_size_bytes: 0
# Interval between garbage collection and snapshotting of the silences. The snapshot will be stored in the state store.
maintenance_interval: 15m
# Retention of the silences.
retention: 120h
nflog:
# Interval between garbage collection and snapshotting of the notification logs. The snapshot will be stored in the state store.
maintenance_interval: 15m
# Retention of the notification logs.
retention: 120h

25
conf/prometheus.yml Normal file
View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 127.0.0.1:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://localhost:9000/signoz_metrics

View File

@@ -26,7 +26,7 @@ cd deploy/docker
docker compose up -d
```
Open http://localhost:3301 in your favourite browser.
Open http://localhost:8080 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:
@@ -55,7 +55,7 @@ cd deploy/docker-swarm
docker stack deploy -c docker-compose.yaml signoz
```
Open http://localhost:3301 in your favourite browser.
Open http://localhost:8080 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:

View File

@@ -1,64 +0,0 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
}
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
location /ws {
proxy_pass http://query-service:8080/ws;
proxy_http_version 1.1;
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1 +1 @@
server_endpoint: ws://query-service:4320/v1/opamp
server_endpoint: ws://signoz:4320/v1/opamp

View File

@@ -172,39 +172,30 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- ./clickhouse-setup/data/alertmanager:/data
depends_on:
- query-service
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:0.75.0
image: signoz/signoz:v0.76.2
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret
healthcheck:
test:
- CMD
@@ -215,19 +206,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.75.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.29
image: signoz/signoz-otel-collector:v0.111.34
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -248,10 +229,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- query-service
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.29
image: signoz/signoz-schema-migrator:v0.111.34
deploy:
restart_policy:
condition: on-failure
@@ -266,8 +247,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -108,33 +108,23 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
- query-service
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:0.75.0
image: signoz/signoz:v0.76.2
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -151,19 +141,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.75.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.29
image: signoz/signoz-otel-collector:v0.111.34
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -184,10 +164,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- query-service
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.29
image: signoz/signoz-schema-migrator:v0.111.34
deploy:
restart_policy:
condition: on-failure
@@ -202,8 +182,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -42,7 +42,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
expr: 'attributes.container_name matches "^(signoz_(logspout|signoz|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
processors:
batch:
send_batch_size: 10000

View File

@@ -175,36 +175,24 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-3:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
container_name: signoz-query-service
image: signoz/signoz:${DOCKER_TAG:-v0.76.2}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -221,21 +209,10 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -257,11 +234,11 @@ services:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
query-service:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
container_name: schema-migrator-sync
command:
- sync
@@ -272,7 +249,7 @@ services:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
container_name: schema-migrator-async
command:
- async
@@ -283,8 +260,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -108,37 +108,25 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
container_name: signoz-query-service
image: signoz/signoz:${DOCKER_TAG:-v0.76.2}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -156,20 +144,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -187,11 +164,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
container_name: schema-migrator-sync
command:
- sync
@@ -203,7 +180,7 @@ services:
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
container_name: schema-migrator-async
command:
- async
@@ -214,8 +191,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -108,36 +108,24 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
signoz:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.75.0}
container_name: signoz-query-service
image: signoz/signoz:${DOCKER_TAG:-v0.76.2}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -154,20 +142,9 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.75.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.34}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -185,11 +162,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
container_name: schema-migrator-sync
command:
- sync
@@ -201,7 +178,7 @@ services:
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.29}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.34}
container_name: schema-migrator-async
command:
- async
@@ -212,8 +189,6 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -79,7 +79,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
expr: 'attributes.container_name matches "^signoz|(signoz-(|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
processors:
batch:
send_batch_size: 10000

View File

@@ -127,7 +127,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="3301|4317"
local ports_pattern="8080|4317"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -144,7 +144,7 @@ check_ports_occupied() {
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "SigNoz requires ports 8080 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
@@ -248,7 +248,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:8080/api/v1/health?live=1" || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -484,7 +484,7 @@ pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
@@ -533,7 +533,7 @@ else
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo -e "🟢 SigNoz is running on http://localhost:8080"
echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"

View File

@@ -0,0 +1,95 @@
# Development Guide
Welcome! This guide will help you set up your local development environment for SigNoz. Let's get you started! 🚀
## What do I need?
Before diving in, make sure you have these tools installed:
- **Git** - Our version control system
- Download from [git-scm.com](https://git-scm.com/)
- **Go** - Powers our backend
- Download from [go.dev/dl](https://go.dev/dl/)
- Check [go.mod](../../go.mod#L3) for the minimum version
- **GCC** - Required for CGO dependencies
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
- **Node** - Powers our frontend
- Download from [nodejs.org](https://nodejs.org)
- Check [.nvmrc](../../frontend/.nvmrc) for the version
- **Yarn** - Our frontend package manager
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
- **Docker** - For running Clickhouse and Postgres locally
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
> 💡 **Tip**: Run `make help` to see all available commands with descriptions
## How do I get the code?
1. Open your terminal
2. Clone the repository:
```bash
git clone https://github.com/SigNoz/signoz.git
```
3. Navigate to the project:
```bash
cd signoz
```
## How do I run it locally?
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
### 1. Setting up Clickhouse
First, we need to get Clickhouse running:
```bash
make devenv-clickhouse
```
This command:
- Starts Clickhouse in a single-shard, single-replica cluster
- Sets up Zookeeper
- Runs the latest schema migrations
### 2. Starting the Backend
1. Run the backend server:
```bash
make run-go
```
2. Verify it's working:
```bash
curl http://localhost:8080/api/v1/health
```
You should see: `{"status":"ok"}`
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
### 3. Setting up the Frontend
1. Install dependencies:
```bash
yarn install
```
2. Create a `.env` file in the `frontend` directory:
```env
FRONTEND_API_ENDPOINT=http://localhost:8080
```
3. Start the development server:
```bash
yarn dev
```
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉

View File

@@ -1,14 +0,0 @@
{
"name": "e2e",
"version": "1.0.0",
"main": "index.js",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.22.0",
"@types/node": "^20.9.2"
},
"scripts": {},
"dependencies": {
"dotenv": "8.2.0"
}
}

View File

@@ -1,46 +0,0 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
dotenv.config();
export default defineConfig({
testDir: "./tests",
fullyParallel: true,
forbidOnly: !!process.env.CI,
name: "Signoz E2E",
retries: process.env.CI ? 2 : 0,
reporter: process.env.CI ? "github" : "list",
preserveOutput: "always",
updateSnapshots: "all",
quiet: false,
testMatch: ["**/*.spec.ts"],
use: {
trace: "on-first-retry",
baseURL:
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
},
projects: [
{ name: "setup", testMatch: /.*\.setup\.ts/ },
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
// Use prepared auth state.
storageState: ".auth/user.json",
},
dependencies: ["setup"],
},
],
});

View File

@@ -1,37 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import dotenv from "dotenv";
dotenv.config();
const authFile = ".auth/user.json";
test("E2E Login Test", async ({ page }) => {
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
const signup = "Monitor your applications. Find what is causing issues.";
const el = await page.locator(`text=${signup}`);
expect(el).toBeVisible();
await page
.locator("id=loginEmail")
.type(
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
);
await page.getByText("Next").click();
await page
.locator('input[id="currentPassword"]')
.fill(
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
);
await page.locator('button[data-attr="signup"]').click();
await expect(page).toHaveURL(ROUTES.APPLICATION);
await page.context().storageState({ path: authFile });
});

View File

@@ -1,10 +0,0 @@
export const SERVICE_TABLE_HEADERS = {
APPLICATION: "Applicaton",
P99LATENCY: "P99 latency (in ms)",
ERROR_RATE: "Error Rate (% of total)",
OPS_PER_SECOND: "Operations Per Second",
};
export const DATA_TEST_IDS = {
NEW_DASHBOARD_BTN: "create-new-dashboard",
};

View File

@@ -1,40 +0,0 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
test("Basic Navigation Check across different resources", async ({ page }) => {
// route to services page and check if the page renders fine with BE contract
await Promise.all([
page.goto(ROUTES.APPLICATION),
page.waitForRequest("**/v1/services"),
]);
const p99Latency = page.locator(
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
);
await expect(p99Latency).toBeVisible();
// route to the new trace explorer page and check if the page renders fine
await page.goto(ROUTES.TRACES_EXPLORER);
await page.waitForLoadState("networkidle");
const listViewTable = await page
.locator('div[role="presentation"]')
.isVisible();
expect(listViewTable).toBeTruthy();
// route to the dashboards page and check if the page renders fine
await Promise.all([
page.goto(ROUTES.ALL_DASHBOARD),
page.waitForRequest("**/v1/dashboards"),
]);
const newDashboardBtn = await page
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
.isVisible();
expect(newDashboardBtn).toBeTruthy();
});

View File

@@ -1,46 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.22.0":
version "1.40.0"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
dependencies:
playwright "1.40.0"
"@types/node@^20.9.2":
version "20.9.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
dependencies:
undici-types "~5.26.4"
dotenv@8.2.0:
version "8.2.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
playwright@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
dependencies:
playwright-core "1.40.0"
optionalDependencies:
fsevents "2.3.2"
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==

View File

@@ -2,26 +2,69 @@ package middleware
import (
"net/http"
"time"
"go.signoz.io/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/uptrace/bun"
"go.uber.org/zap"
)
type Pat struct {
db *bun.DB
uuid *authtypes.UUID
headers []string
}
func NewPat(headers []string) *Pat {
return &Pat{uuid: authtypes.NewUUID(), headers: headers}
func NewPat(db *bun.DB, headers []string) *Pat {
return &Pat{db: db, uuid: authtypes.NewUUID(), headers: headers}
}
func (p *Pat) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var values []string
var patToken string
var pat types.StorablePersonalAccessToken
var updateLastUsed bool
for _, header := range p.headers {
values = append(values, r.Header.Get(header))
}
if header == "SIGNOZ-API-KEY" {
patToken = values[0]
err := p.db.NewSelect().Model(&pat).Where("token = ?", patToken).Scan(r.Context())
if err != nil {
next.ServeHTTP(w, r)
return
}
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
next.ServeHTTP(w, r)
return
}
// get user from db
user := types.User{}
err = p.db.NewSelect().Model(&user).Where("id = ?", pat.UserID).Scan(r.Context())
if err != nil {
next.ServeHTTP(w, r)
return
}
jwt := authtypes.Claims{
UserID: user.ID,
GroupID: user.GroupID,
Email: user.Email,
OrgID: user.OrgID,
}
ctx := authtypes.NewContextWithClaims(r.Context(), jwt)
r = r.WithContext(ctx)
// Mark to update last used since SIGNOZ-API-KEY is present and successful
updateLastUsed = true
}
}
ctx, err := p.uuid.ContextFromRequest(r.Context(), values...)
if err != nil {
next.ServeHTTP(w, r)
@@ -31,6 +74,16 @@ func (p *Pat) Wrap(next http.Handler) http.Handler {
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
// update last used only if SIGNOZ-API-KEY was present and successful
if updateLastUsed {
pat.LastUsed = time.Now().Unix()
_, err = p.db.NewUpdate().Model(&pat).Column("last_used").Where("token = ?", patToken).Where("revoked = false").Exec(r.Context())
if err != nil {
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
}
}
})
}

View File

@@ -0,0 +1,70 @@
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
version: 2
project_name: signoz
before:
hooks:
- go mod tidy
builds:
- id: signoz
binary: bin/signoz
main: ee/query-service/main.go
env:
- CGO_ENABLED=1
- >-
{{- if eq .Os "linux" }}
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
{{- end }}
goos:
- linux
- darwin
goarch:
- amd64
- arm64
goamd64:
- v1
goarm64:
- v8.0
ldflags:
- -s -w
- -X github.com/SigNoz/signoz/pkg/query-service/version.version={{ .Version }}
- -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }}
- -X main.builtBy=goreleaser
- -X github.com/SigNoz/signoz/pkg/query-service/version.buildVersion={{ .Version }}
- -X github.com/SigNoz/signoz/pkg/query-service/version.buildHash={{ .ShortCommit }}
- -X github.com/SigNoz/signoz/pkg/query-service/version.buildTime={{ .Date }}
- -X github.com/SigNoz/signoz/pkg/query-service/version.gitBranch={{ .Branch }}
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"
tags:
- timetzdata
archives:
- formats:
- tar.gz
name_template: >-
{{ .ProjectName }}_{{- .Os }}_{{- .Arch }}
wrap_in_directory: true
strip_binary_directory: false
files:
- src: README.md
dst: README.md
- src: LICENSE
dst: LICENSE
- src: frontend/build
dst: web
- src: conf
dst: conf
- src: templates
dst: templates
release:
name_template: "v{{ .Version }}"
draft: false
prerelease: auto

View File

@@ -13,21 +13,21 @@ RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
# copy the query-service binary
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
# copy the signoz binary
COPY ee/query-service/bin/signoz-${TARGETOS}-${TARGETARCH} /root/signoz
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
# Make signoz executable for non-root users
RUN chmod 755 /root /root/signoz
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# run the binary
ENTRYPOINT ["./query-service"]
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -3,8 +3,8 @@ package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
)
type DailyProvider struct {

View File

@@ -3,8 +3,8 @@ package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
)
type HourlyProvider struct {

View File

@@ -4,8 +4,8 @@ import (
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/common"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/common"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
)
type Seasonality string

View File

@@ -5,11 +5,11 @@ import (
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)

View File

@@ -3,8 +3,8 @@ package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
)
type WeeklyProvider struct {

View File

@@ -5,22 +5,24 @@ import (
"net/http/httputil"
"time"
"github.com/SigNoz/signoz/ee/query-service/dao"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/interfaces"
"github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/alertmanager"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/version"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/cache"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version"
"go.signoz.io/signoz/pkg/types/authtypes"
)
type APIHandlerOptions struct {
@@ -51,7 +53,7 @@ type APIHandler struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
@@ -67,6 +69,8 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
Signoz: signoz,
})
if err != nil {

View File

@@ -12,10 +12,10 @@ import (
"github.com/gorilla/mux"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
baseauth "github.com/SigNoz/signoz/pkg/query-service/auth"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
func parseRequest(r *http.Request, req interface{}) error {
@@ -134,7 +134,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
return
}
_, registerError := baseauth.Register(ctx, req)
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager)
if !registerError.IsNil() {
RespondError(w, apierr, nil)
return

View File

@@ -10,15 +10,15 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/auth"
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/dao"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/dao"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"go.uber.org/zap"
)
@@ -118,10 +118,10 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
return "", apiErr
}
allPats, err := ah.AppDao().ListPATs(ctx)
allPats, err := ah.AppDao().ListPATs(ctx, orgId)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err.Error(),
"couldn't list PATs: %w", err,
))
}
for _, p := range allPats {
@@ -136,18 +136,22 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
)
newPAT := model.PAT{
Token: generatePATToken(),
UserID: integrationUser.ID,
Name: integrationPATName,
Role: baseconstants.ViewerGroup,
ExpiresAt: 0,
CreatedAt: time.Now().Unix(),
UpdatedAt: time.Now().Unix(),
StorablePersonalAccessToken: types.StorablePersonalAccessToken{
Token: generatePATToken(),
UserID: integrationUser.ID,
Name: integrationPATName,
Role: baseconstants.ViewerGroup,
ExpiresAt: 0,
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
},
}
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
integrationPAT, err := ah.AppDao().CreatePAT(ctx, orgId, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err.Error(),
"couldn't create cloud integration PAT: %w", err,
))
}
return integrationPAT.Token, nil

View File

@@ -1,15 +1,15 @@
package api
import (
"errors"
"net/http"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
"github.com/SigNoz/signoz/pkg/query-service/auth"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/gorilla/mux"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
@@ -31,26 +31,31 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
if strings.HasPrefix(uuid,"integration") {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
return
}
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
if strings.HasPrefix(uuid, "integration") {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "dashboards created by integrations cannot be modified"))
return
}
user := common.GetUserFromContext(r.Context())
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
claims, ok := authtypes.ClaimsFromContext(r.Context())
if !ok {
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
return
}
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
return
}
if !auth.IsAdminV2(claims) && (dashboard.CreatedBy != claims.Email) {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
return
}
// Lock/Unlock the dashboard
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
return
}

View File

@@ -6,9 +6,10 @@ import (
"fmt"
"net/http"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
)
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
@@ -24,7 +25,7 @@ func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := model.OrgDomain{}
req := types.GettableOrgDomain{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
@@ -54,12 +55,12 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
return
}
req := model.OrgDomain{Id: domainId}
req := types.GettableOrgDomain{StorableOrgDomain: types.StorableOrgDomain{ID: domainId}}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req.Id = domainId
req.ID = domainId
if err := req.Valid(nil); err != nil {
RespondError(w, model.BadRequest(err), nil)
}

View File

@@ -8,8 +8,8 @@ import (
"net/http"
"time"
"go.signoz.io/signoz/ee/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/ee/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.uber.org/zap"
)

View File

@@ -3,8 +3,8 @@ package api
import (
"testing"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/stretchr/testify/assert"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func TestMergeFeatureSets(t *testing.T) {

View File

@@ -4,7 +4,7 @@ import (
"net/http"
"strings"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
)
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {

View File

@@ -3,13 +3,12 @@ package api
import (
"encoding/json"
"fmt"
"io"
"net/http"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/http/render"
"go.uber.org/zap"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/http/render"
)
type DayWiseBreakdown struct {
@@ -48,6 +47,10 @@ type details struct {
BillTotal float64 `json:"billTotal"`
}
type Redirect struct {
RedirectURL string `json:"redirectURL"`
}
type billingDetails struct {
Status string `json:"status"`
Data struct {
@@ -119,36 +122,31 @@ func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request)
render.Success(w, http.StatusNoContent, nil)
}
func getCheckoutPortalResponse(redirectURL string) *Redirect {
return &Redirect{RedirectURL: redirectURL}
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
checkoutRequest := &model.CheckoutRequest{}
if err := json.NewDecoder(r.Body).Decode(checkoutRequest); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
license := ah.LM().GetActiveLicense()
if license == nil {
RespondError(w, model.BadRequestStr("cannot proceed with checkout without license key"), nil)
return
}
redirectUrl, err := signozio.CheckoutSession(r.Context(), checkoutRequest, license.Key)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
RespondError(w, err, nil)
return
}
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
}
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
@@ -228,102 +226,28 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
Licenses: licenses,
}
var currentActiveLicenseKey string
for _, license := range licenses {
if license.IsCurrent {
currentActiveLicenseKey = license.Key
}
}
// For the case when no license is applied i.e community edition
// There will be no trial details or license details
if currentActiveLicenseKey == "" {
ah.Respond(w, resp)
return
}
// Fetch trial details
hClient := &http.Client{}
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
zap.L().Error("Error while creating request for trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid blocking the UI
ah.Respond(w, resp)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
trialResp, err := hClient.Do(req)
if err != nil {
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
defer trialResp.Body.Close()
trialRespBody, err := io.ReadAll(trialResp.Body)
if err != nil || trialResp.StatusCode != http.StatusOK {
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
// decode response body
var trialRespData model.SubscriptionServerResp
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
zap.L().Error("Error while decoding trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
resp.TrialStart = trialRespData.Data.TrialStart
resp.TrialEnd = trialRespData.Data.TrialEnd
resp.OnTrial = trialRespData.Data.OnTrial
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
ah.Respond(w, resp)
}
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
portalRequest := &model.PortalRequest{}
if err := json.NewDecoder(r.Body).Decode(portalRequest); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
license := ah.LM().GetActiveLicense()
if license == nil {
RespondError(w, model.BadRequestStr("cannot request the portal session without license key"), nil)
return
}
redirectUrl, err := signozio.PortalSession(r.Context(), portalRequest, license.Key)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
RespondError(w, err, nil)
return
}
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
}

View File

@@ -9,11 +9,12 @@ import (
"net/http"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/auth"
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
@@ -43,9 +44,11 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
return
}
pat := model.PAT{
Name: req.Name,
Role: req.Role,
ExpiresAt: req.ExpiresInDays,
StorablePersonalAccessToken: types.StorablePersonalAccessToken{
Name: req.Name,
Role: req.Role,
ExpiresAt: req.ExpiresInDays,
},
}
err = validatePATRequest(pat)
if err != nil {
@@ -55,8 +58,8 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
// All the PATs are associated with the user creating the PAT.
pat.UserID = user.ID
pat.CreatedAt = time.Now().Unix()
pat.UpdatedAt = time.Now().Unix()
pat.CreatedAt = time.Now()
pat.UpdatedAt = time.Now()
pat.LastUsed = 0
pat.Token = generatePATToken()
@@ -67,7 +70,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
var apierr basemodel.BaseApiError
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
if pat, apierr = ah.AppDao().CreatePAT(ctx, user.OrgID, pat); apierr != nil {
RespondError(w, apierr, nil)
return
}
@@ -114,10 +117,10 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
req.UpdatedByUserID = user.ID
id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now().Unix()
req.UpdatedAt = time.Now()
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
var apierr basemodel.BaseApiError
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
if apierr = ah.AppDao().UpdatePAT(ctx, user.OrgID, req, id); apierr != nil {
RespondError(w, apierr, nil)
return
}
@@ -136,7 +139,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
return
}
zap.L().Info("Get PATs for user", zap.String("user_id", user.ID))
pats, apierr := ah.AppDao().ListPATs(ctx)
pats, apierr := ah.AppDao().ListPATs(ctx, user.OrgID)
if apierr != nil {
RespondError(w, apierr, nil)
return
@@ -157,7 +160,7 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
}
zap.L().Info("Revoke PAT with id", zap.String("id", id))
if apierr := ah.AppDao().RevokePAT(ctx, id, user.ID); apierr != nil {
if apierr := ah.AppDao().RevokePAT(ctx, user.OrgID, id, user.ID); apierr != nil {
RespondError(w, apierr, nil)
return
}

View File

@@ -6,11 +6,11 @@ import (
"io"
"net/http"
"go.signoz.io/signoz/ee/query-service/anomaly"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/ee/query-service/anomaly"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
)

View File

@@ -3,8 +3,8 @@ package api
import (
"net/http"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {

View File

@@ -3,10 +3,10 @@ package api
import (
"net/http"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/ee/query-service/app/db"
"github.com/SigNoz/signoz/ee/query-service/model"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.uber.org/zap"
)

View File

@@ -7,9 +7,9 @@ import (
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/cache"
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/cache"
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
)
type ClickhouseReader struct {

View File

@@ -4,8 +4,8 @@ import (
"errors"
"strconv"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/ee/query-service/model"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.uber.org/zap"
)

View File

@@ -12,46 +12,47 @@ import (
"github.com/gorilla/handlers"
"github.com/jmoiron/sqlx"
eemiddleware "github.com/SigNoz/signoz/ee/http/middleware"
"github.com/SigNoz/signoz/ee/query-service/app/api"
"github.com/SigNoz/signoz/ee/query-service/app/db"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/dao"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/interfaces"
"github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/query-service/auth"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/web"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
eemiddleware "go.signoz.io/signoz/ee/http/middleware"
"go.signoz.io/signoz/ee/query-service/app/api"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/auth"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/rules"
"go.signoz.io/signoz/pkg/http/middleware"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.signoz.io/signoz/pkg/web"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
licensepkg "github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/query-service/agentConf"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
"go.signoz.io/signoz/pkg/query-service/app/preferences"
"go.signoz.io/signoz/pkg/query-service/cache"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
baseexplorer "github.com/SigNoz/signoz/pkg/query-service/app/explorer"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/app/preferences"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
pqle "github.com/SigNoz/signoz/pkg/query-service/pqlEngine"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
@@ -111,7 +112,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
return nil, err
}
@@ -119,7 +120,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.BunDB()); err != nil {
return nil, err
}
@@ -176,8 +177,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
<-readerReady
rm, err := makeRulesManager(serverOptions.PromConfigPath,
baseconst.GetAlertManagerApiPrefix(),
rm, err := makeRulesManager(
serverOptions.PromConfigPath,
serverOptions.RuleRepoURL,
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
@@ -186,6 +187,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
lm,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
serverOptions.SigNoz.Alertmanager,
serverOptions.SigNoz.SQLStore,
)
if err != nil {
@@ -268,7 +271,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
JWT: serverOptions.Jwt,
}
apiHandler, err := api.NewAPIHandler(apiOpts)
apiHandler, err := api.NewAPIHandler(apiOpts, serverOptions.SigNoz)
if err != nil {
return nil, err
}
@@ -301,6 +304,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
&opAmpModel.AllAgents, agentConfMgr,
)
errorList := qb.PreloadMetricsMetadata(context.Background())
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
return s, nil
}
@@ -309,7 +317,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter()
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore.BunDB(), []string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
@@ -342,7 +350,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
// add auth middleware
getUserFromRequest := func(ctx context.Context) (*types.GettableUser, error) {
user, err := auth.GetUserFromRequestContext(ctx, apiHandler)
user, err := auth.GetUserFromReqContext(ctx)
if err != nil {
return nil, err
@@ -357,7 +365,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat([]string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore.BunDB(), []string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
@@ -530,7 +538,6 @@ func (s *Server) Stop() error {
func makeRulesManager(
promConfigPath,
alertManagerURL string,
ruleRepoURL string,
db *sqlx.DB,
ch baseint.Reader,
@@ -538,39 +545,34 @@ func makeRulesManager(
disableRules bool,
fm baseint.FeatureLookup,
useLogsNewSchema bool,
useTraceNewSchema bool) (*baserules.Manager, error) {
useTraceNewSchema bool,
alertmanager alertmanager.Alertmanager,
sqlstore sqlstore.SQLStore,
) (*baserules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
if err != nil {
return nil, fmt.Errorf("failed to create pql engine : %v", err)
}
// notifier opts
notifierOpts := basealm.NotifierOptions{
QueueCapacity: 10000,
Timeout: 1 * time.Second,
AlertManagerURLs: []string{alertManagerURL},
}
// create manager opts
managerOpts := &baserules.ManagerOptions{
NotifierOpts: notifierOpts,
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
Alertmanager: alertmanager,
SQLStore: sqlstore,
}
// create Manager

View File

@@ -1,56 +0,0 @@
package auth
import (
"context"
"fmt"
"time"
"go.signoz.io/signoz/ee/query-service/app/api"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.uber.org/zap"
)
func GetUserFromRequestContext(ctx context.Context, apiHandler *api.APIHandler) (*types.GettableUser, error) {
patToken, ok := authtypes.UUIDFromContext(ctx)
if ok && patToken != "" {
zap.L().Debug("Received a non-zero length PAT token")
ctx := context.Background()
dao := apiHandler.AppDao()
pat, err := dao.GetPAT(ctx, patToken)
if err == nil && pat != nil {
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
return nil, fmt.Errorf("PAT has expired")
}
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
if apiErr != nil {
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
return nil, apiErr
}
user, err := dao.GetUser(ctx, pat.UserID)
if err != nil {
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
telemetry.GetInstance().SetPatTokenUser()
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
user.User.GroupID = group.ID
user.User.ID = pat.Id
return &types.GettableUser{
User: user.User,
Role: pat.Role,
}, nil
}
if err != nil {
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
}
return baseauth.GetUserFromReqContext(ctx)
}

View File

@@ -5,7 +5,7 @@ import (
)
const (
DefaultSiteURL = "https://localhost:3301"
DefaultSiteURL = "https://localhost:8080"
)
var LicenseSignozIo = "https://license.signoz.io/api/v1"

View File

@@ -1,8 +1,8 @@
package dao
import (
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
"go.signoz.io/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/ee/query-service/dao/sqlite"
"github.com/SigNoz/signoz/pkg/sqlstore"
)
func InitDao(sqlStore sqlstore.SQLStore) (ModelDao, error) {

View File

@@ -4,14 +4,15 @@ import (
"context"
"net/url"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
ossTypes "github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/model"
basedao "go.signoz.io/signoz/pkg/query-service/dao"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"github.com/uptrace/bun"
)
type ModelDao interface {
@@ -20,27 +21,26 @@ type ModelDao interface {
// SetFlagProvider sets the feature lookup provider
SetFlagProvider(flags baseint.FeatureLookup)
DB() *sqlx.DB
DB() *bun.DB
// auth methods
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error)
// org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *types.GettableOrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError)
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError
CreatePAT(ctx context.Context, orgID string, p model.PAT) (model.PAT, basemodel.BaseApiError)
UpdatePAT(ctx context.Context, orgID string, p model.PAT, id string) basemodel.BaseApiError
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError)
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
GetPATByID(ctx context.Context, orgID string, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError)
ListPATs(ctx context.Context, orgID string) ([]model.PAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, orgID string, id string, userID string) basemodel.BaseApiError
}

View File

@@ -7,15 +7,15 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
baseauth "github.com/SigNoz/signoz/pkg/query-service/auth"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
"go.uber.org/zap"
)
@@ -53,7 +53,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
},
ProfilePictureURL: "", // Currently unused
GroupID: group.ID,
OrgID: domain.OrgId,
OrgID: domain.OrgID,
}
user, apiErr = m.CreateUser(ctx, user, false)

View File

@@ -9,32 +9,23 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
ossTypes "github.com/SigNoz/signoz/pkg/types"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
// StoredDomain represents stored database record for org domain
type StoredDomain struct {
Id uuid.UUID `db:"id"`
Name string `db:"name"`
OrgId string `db:"org_id"`
Data string `db:"data"`
CreatedAt int64 `db:"created_at"`
UpdatedAt int64 `db:"updated_at"`
}
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId or domainName as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
var domainNameStr string
var domain *model.OrgDomain
var domain *types.GettableOrgDomain
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
@@ -76,10 +67,14 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
}
// GetDomainByName returns org domain for a given domain name
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("name = ?", name).
Limit(1).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
@@ -88,7 +83,7 @@ func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.Org
return nil, model.InternalError(err)
}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
@@ -96,10 +91,14 @@ func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.Org
}
// GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError) {
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("id = ?", id).
Limit(1).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
@@ -108,7 +107,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
return nil, model.InternalError(err)
}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
@@ -116,21 +115,24 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomai
}
// ListDomains gets the list of auth domains by org id
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
domains := []model.OrgDomain{}
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError) {
domains := []types.GettableOrgDomain{}
stored := []StoredDomain{}
err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
stored := []types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("org_id = ?", orgId).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
return []model.OrgDomain{}, nil
return domains, nil
}
return nil, model.InternalError(err)
}
for _, s := range stored {
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
domain := types.GettableOrgDomain{StorableOrgDomain: s}
if err := domain.LoadConfig(s.Data); err != nil {
zap.L().Error("ListDomains() failed", zap.Error(err))
}
@@ -141,14 +143,14 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
}
// CreateDomain creates a new auth domain
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
func (m *modelDao) CreateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil {
domain.Id = uuid.New()
if domain.ID == uuid.Nil {
domain.ID = uuid.New()
}
if domain.OrgId == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
if domain.OrgID == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgID, Name "))
}
configJson, err := json.Marshal(domain)
@@ -157,14 +159,17 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
return model.InternalError(fmt.Errorf("domain creation failed"))
}
_, err = m.DB().ExecContext(ctx,
"INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
domain.Id,
domain.Name,
domain.OrgId,
configJson,
time.Now().Unix(),
time.Now().Unix())
storableDomain := types.StorableOrgDomain{
ID: domain.ID,
Name: domain.Name,
OrgID: domain.OrgID,
Data: string(configJson),
TimeAuditable: ossTypes.TimeAuditable{CreatedAt: time.Now(), UpdatedAt: time.Now()},
}
_, err = m.DB().NewInsert().
Model(&storableDomain).
Exec(ctx)
if err != nil {
zap.L().Error("failed to insert domain in db", zap.Error(err))
@@ -175,9 +180,9 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
}
// UpdateDomain updates stored config params for a domain
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
func (m *modelDao) UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil {
if domain.ID == uuid.Nil {
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain update failed"))
}
@@ -188,11 +193,19 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
return model.InternalError(fmt.Errorf("domain update failed"))
}
_, err = m.DB().ExecContext(ctx,
"UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
configJson,
time.Now().Unix(),
domain.Id)
storableDomain := &types.StorableOrgDomain{
ID: domain.ID,
Name: domain.Name,
OrgID: domain.OrgID,
Data: string(configJson),
TimeAuditable: ossTypes.TimeAuditable{UpdatedAt: time.Now()},
}
_, err = m.DB().NewUpdate().
Model(storableDomain).
Column("data", "updated_at").
WherePK().
Exec(ctx)
if err != nil {
zap.L().Error("domain update failed", zap.Error(err))
@@ -210,9 +223,11 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
return model.InternalError(fmt.Errorf("domain delete failed"))
}
_, err := m.DB().ExecContext(ctx,
"DELETE FROM org_domains WHERE id = $1",
id)
storableDomain := &types.StorableOrgDomain{ID: id}
_, err := m.DB().NewDelete().
Model(storableDomain).
WherePK().
Exec(ctx)
if err != nil {
zap.L().Error("domain delete failed", zap.Error(err))
@@ -222,7 +237,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
return nil
}
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
if email == "" {
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
@@ -235,8 +250,12 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
parsedDomain := components[1]
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("name = ?", parsedDomain).
Limit(1).
Scan(ctx)
if err != nil {
if err == sql.ErrNoRows {
@@ -245,7 +264,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.O
return nil, model.InternalError(err)
}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}

View File

@@ -3,11 +3,11 @@ package sqlite
import (
"fmt"
"github.com/jmoiron/sqlx"
basedao "go.signoz.io/signoz/pkg/query-service/dao"
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/sqlstore"
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
basedsql "github.com/SigNoz/signoz/pkg/query-service/dao/sqlite"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
)
type modelDao struct {
@@ -41,6 +41,6 @@ func InitDB(sqlStore sqlstore.SQLStore) (*modelDao, error) {
return m, nil
}
func (m *modelDao) DB() *sqlx.DB {
func (m *modelDao) DB() *bun.DB {
return m.ModelDaoSqlite.DB()
}

View File

@@ -3,39 +3,25 @@ package sqlite
import (
"context"
"fmt"
"strconv"
"time"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"github.com/SigNoz/signoz/ee/query-service/model"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"go.uber.org/zap"
)
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
result, err := m.DB().ExecContext(ctx,
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
p.UserID,
p.Token,
p.Role,
p.Name,
p.CreatedAt,
p.ExpiresAt,
p.UpdatedAt,
p.UpdatedByUserID,
p.LastUsed,
p.Revoked,
)
func (m *modelDao) CreatePAT(ctx context.Context, orgID string, p model.PAT) (model.PAT, basemodel.BaseApiError) {
p.StorablePersonalAccessToken.OrgID = orgID
_, err := m.DB().NewInsert().
Model(&p.StorablePersonalAccessToken).
Returning("id").
Exec(ctx)
if err != nil {
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
}
id, err := result.LastInsertId()
if err != nil {
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
}
p.Id = strconv.Itoa(int(id))
createdByUser, _ := m.GetUser(ctx, p.UserID)
if createdByUser == nil {
p.CreatedByUser = model.User{
@@ -54,14 +40,14 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
return p, nil
}
func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError {
_, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET role=$1, name=$2, updated_at=$3, updated_by_user_id=$4 WHERE id=$5 and revoked=false;",
p.Role,
p.Name,
p.UpdatedAt,
p.UpdatedByUserID,
id)
func (m *modelDao) UpdatePAT(ctx context.Context, orgID string, p model.PAT, id string) basemodel.BaseApiError {
_, err := m.DB().NewUpdate().
Model(&p.StorablePersonalAccessToken).
Column("role", "name", "updated_at", "updated_by_user_id").
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("revoked = false").
Exec(ctx)
if err != nil {
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT update failed"))
@@ -69,33 +55,32 @@ func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemo
return nil
}
func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed int64) basemodel.BaseApiError {
_, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET last_used=$1 WHERE token=$2 and revoked=false;",
lastUsed,
token)
if err != nil {
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT last used update failed"))
}
return nil
}
func (m *modelDao) ListPATs(ctx context.Context, orgID string) ([]model.PAT, basemodel.BaseApiError) {
pats := []types.StorablePersonalAccessToken{}
func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{}
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
if err := m.DB().NewSelect().
Model(&pats).
Where("revoked = false").
Where("org_id = ?", orgID).
Order("updated_at DESC").
Scan(ctx); err != nil {
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
}
patsWithUsers := []model.PAT{}
for i := range pats {
patWithUser := model.PAT{
StorablePersonalAccessToken: pats[i],
}
createdByUser, _ := m.GetUser(ctx, pats[i].UserID)
if createdByUser == nil {
pats[i].CreatedByUser = model.User{
patWithUser.CreatedByUser = model.User{
NotFound: true,
}
} else {
pats[i].CreatedByUser = model.User{
patWithUser.CreatedByUser = model.User{
Id: createdByUser.ID,
Name: createdByUser.Name,
Email: createdByUser.Email,
@@ -107,11 +92,11 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
updatedByUser, _ := m.GetUser(ctx, pats[i].UpdatedByUserID)
if updatedByUser == nil {
pats[i].UpdatedByUser = model.User{
patWithUser.UpdatedByUser = model.User{
NotFound: true,
}
} else {
pats[i].UpdatedByUser = model.User{
patWithUser.UpdatedByUser = model.User{
Id: updatedByUser.ID,
Name: updatedByUser.Name,
Email: updatedByUser.Email,
@@ -120,15 +105,22 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
NotFound: false,
}
}
patsWithUsers = append(patsWithUsers, patWithUser)
}
return pats, nil
return patsWithUsers, nil
}
func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError {
func (m *modelDao) RevokePAT(ctx context.Context, orgID string, id string, userID string) basemodel.BaseApiError {
updatedAt := time.Now().Unix()
_, err := m.DB().ExecContext(ctx,
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
userID, updatedAt, id)
_, err := m.DB().NewUpdate().
Model(&types.StorablePersonalAccessToken{}).
Set("revoked = ?", true).
Set("updated_by_user_id = ?", userID).
Set("updated_at = ?", updatedAt).
Where("id = ?", id).
Where("org_id = ?", orgID).
Exec(ctx)
if err != nil {
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT revoke failed"))
@@ -137,9 +129,13 @@ func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) base
}
func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{}
pats := []types.StorablePersonalAccessToken{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE token=? and revoked=false;`, token); err != nil {
if err := m.DB().NewSelect().
Model(&pats).
Where("token = ?", token).
Where("revoked = false").
Scan(ctx); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
}
@@ -150,13 +146,22 @@ func (m *modelDao) GetPAT(ctx context.Context, token string) (*model.PAT, basemo
}
}
return &pats[0], nil
patWithUser := model.PAT{
StorablePersonalAccessToken: pats[0],
}
return &patWithUser, nil
}
func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{}
func (m *modelDao) GetPATByID(ctx context.Context, orgID string, id string) (*model.PAT, basemodel.BaseApiError) {
pats := []types.StorablePersonalAccessToken{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE id=? and revoked=false;`, id); err != nil {
if err := m.DB().NewSelect().
Model(&pats).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("revoked = false").
Scan(ctx); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch PAT"))
}
@@ -167,26 +172,25 @@ func (m *modelDao) GetPATByID(ctx context.Context, id string) (*model.PAT, basem
}
}
return &pats[0], nil
patWithUser := model.PAT{
StorablePersonalAccessToken: pats[0],
}
return &patWithUser, nil
}
// deprecated
func (m *modelDao) GetUserByPAT(ctx context.Context, token string) (*types.GettableUser, basemodel.BaseApiError) {
func (m *modelDao) GetUserByPAT(ctx context.Context, orgID string, token string) (*types.GettableUser, basemodel.BaseApiError) {
users := []types.GettableUser{}
query := `SELECT
u.id,
u.name,
u.email,
u.password,
u.created_at,
u.profile_picture_url,
u.org_id,
u.group_id
FROM users u, personal_access_tokens p
WHERE u.id = p.user_id and p.token=? and p.expires_at >= strftime('%s', 'now');`
if err := m.DB().Select(&users, query, token); err != nil {
if err := m.DB().NewSelect().
Model(&users).
Column("u.id", "u.name", "u.email", "u.password", "u.created_at", "u.profile_picture_url", "u.org_id", "u.group_id").
Join("JOIN personal_access_tokens p ON u.id = p.user_id").
Where("p.token = ?", token).
Where("p.expires_at >= strftime('%s', 'now')").
Where("p.org_id = ?", orgID).
Scan(ctx); err != nil {
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
}

View File

@@ -6,3 +6,11 @@ type ValidateLicenseResponse struct {
Status status `json:"status"`
Data map[string]interface{} `json:"data"`
}
type CheckoutSessionRedirect struct {
RedirectURL string `json:"url"`
}
type CheckoutResponse struct {
Status status `json:"status"`
Data CheckoutSessionRedirect `json:"data"`
}

View File

@@ -11,8 +11,8 @@ import (
"github.com/pkg/errors"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
)
var C *Client
@@ -47,7 +47,7 @@ func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
return nil, model.BadRequest(errors.Wrap(err, "failed to create request"))
}
// Setting the custom header
@@ -55,7 +55,7 @@ func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
response, err := client.Do(req)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
return nil, model.BadRequest(errors.Wrap(err, "failed to make post request"))
}
body, err := io.ReadAll(response.Body)
@@ -126,10 +126,98 @@ func SendUsage(ctx context.Context, usage model.UsagePayload) *model.ApiError {
case 200, 201:
return nil
case 400, 401:
return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
return model.BadRequest(errors.Wrap(errors.New(string(body)),
"bad request error received from license.signoz.io"))
default:
return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
return model.InternalError(errors.Wrap(errors.New(string(body)),
"internal error received from license.signoz.io"))
}
}
func CheckoutSession(ctx context.Context, checkoutRequest *model.CheckoutRequest, licenseKey string) (string, *model.ApiError) {
hClient := &http.Client{}
reqString, err := json.Marshal(checkoutRequest)
if err != nil {
return "", model.BadRequest(err)
}
req, err := http.NewRequestWithContext(ctx, "POST", C.GatewayUrl+"/v2/subscriptions/me/sessions/checkout", bytes.NewBuffer(reqString))
if err != nil {
return "", model.BadRequest(err)
}
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := hClient.Do(req)
if err != nil {
return "", model.BadRequest(err)
}
body, err := io.ReadAll(response.Body)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read checkout response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 201:
a := CheckoutResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, "failed to unmarshal zeus checkout response"))
}
return a.Data.RedirectURL, nil
case 400:
return "", model.BadRequest(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return "", model.Unauthorized(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return "", model.InternalError(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}
func PortalSession(ctx context.Context, checkoutRequest *model.PortalRequest, licenseKey string) (string, *model.ApiError) {
hClient := &http.Client{}
reqString, err := json.Marshal(checkoutRequest)
if err != nil {
return "", model.BadRequest(err)
}
req, err := http.NewRequestWithContext(ctx, "POST", C.GatewayUrl+"/v2/subscriptions/me/sessions/portal", bytes.NewBuffer(reqString))
if err != nil {
return "", model.BadRequest(err)
}
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := hClient.Do(req)
if err != nil {
return "", model.BadRequest(err)
}
body, err := io.ReadAll(response.Body)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read portal response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 201:
a := CheckoutResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return "", model.BadRequest(errors.Wrap(err, "failed to unmarshal zeus portal response"))
}
return a.Data.RedirectURL, nil
case 400:
return "", model.BadRequest(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return "", model.Unauthorized(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return "", model.InternalError(errors.Wrap(errors.New(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}

View File

@@ -1,7 +1,7 @@
package interfaces
import (
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
)
// Connector defines methods for interaction

View File

@@ -11,9 +11,9 @@ import (
"github.com/mattn/go-sqlite3"
"github.com/uptrace/bun"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/types"
"github.com/SigNoz/signoz/ee/query-service/model"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"go.uber.org/zap"
)

View File

@@ -11,14 +11,14 @@ import (
"sync"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/types"
"go.signoz.io/signoz/pkg/types/authtypes"
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
validate "github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
"github.com/SigNoz/signoz/ee/query-service/model"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
@@ -155,7 +155,7 @@ func (lm *Manager) ValidatorV3(ctx context.Context) {
tick := time.NewTicker(validationFrequency)
defer tick.Stop()
lm.ValidateV3(ctx)
_ = lm.ValidateV3(ctx)
for {
select {
case <-lm.done:
@@ -165,7 +165,7 @@ func (lm *Manager) ValidatorV3(ctx context.Context) {
case <-lm.done:
return
case <-tick.C:
lm.ValidateV3(ctx)
_ = lm.ValidateV3(ctx)
}
}
@@ -265,6 +265,10 @@ func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseRe
return license, nil
}
func (lm *Manager) GetActiveLicense() *model.LicenseV3 {
return lm.activeLicenseV3
}
// CheckFeature will be internally used by backend routines
// for feature gating
func (lm *Manager) CheckFeature(featureKey string) error {

View File

@@ -7,20 +7,19 @@ import (
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/pkg/config"
"github.com/SigNoz/signoz/pkg/config/envprovider"
"github.com/SigNoz/signoz/pkg/config/fileprovider"
"github.com/SigNoz/signoz/pkg/query-service/auth"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/version"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app"
"go.signoz.io/signoz/pkg/config"
"go.signoz.io/signoz/pkg/config/envprovider"
"go.signoz.io/signoz/pkg/config/fileprovider"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/version"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/types/authtypes"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -150,7 +149,14 @@ func main() {
zap.L().Fatal("Failed to create config", zap.Error(err))
}
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
signoz, err := signoz.New(
context.Background(),
config,
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
signoz.NewSQLStoreProviderFactories(),
signoz.NewTelemetryStoreProviderFactories(),
)
if err != nil {
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
}
@@ -198,16 +204,19 @@ func main() {
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
}
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
signoz.Start(context.Background())
for {
select {
case status := <-server.HealthCheckStatus():
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status)))
case <-signalsChannel:
zap.L().Fatal("Received OS Interrupt Signal ... ")
server.Stop()
}
if err := signoz.Wait(context.Background()); err != nil {
zap.L().Fatal("Failed to start signoz", zap.Error(err))
}
err = server.Stop()
if err != nil {
zap.L().Fatal("Failed to stop server", zap.Error(err))
}
err = signoz.Stop(context.Background())
if err != nil {
zap.L().Fatal("Failed to stop signoz", zap.Error(err))
}
}

View File

@@ -1,7 +1,7 @@
package model
import (
basemodel "go.signoz.io/signoz/pkg/query-service/model"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
// GettableInvitation overrides base object and adds precheck into

View File

@@ -3,7 +3,7 @@ package model
import (
"fmt"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
type ApiError struct {

View File

@@ -6,8 +6,8 @@ import (
"reflect"
"time"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/pkg/errors"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
type License struct {
@@ -236,3 +236,11 @@ func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
}
}
type CheckoutRequest struct {
SuccessURL string `json:"url"`
}
type PortalRequest struct {
SuccessURL string `json:"url"`
}

View File

@@ -4,10 +4,10 @@ import (
"encoding/json"
"testing"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
)
func TestNewLicenseV3(t *testing.T) {
@@ -97,8 +97,8 @@ func TestNewLicenseV3(t *testing.T) {
},
},
{
name: "Fallback to basic plan if license status is inactive",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
name: "Fallback to basic plan if license status is invalid",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
@@ -108,14 +108,14 @@ func TestNewLicenseV3(t *testing.T) {
"name": "TEAMS",
},
"category": "FREE",
"status": "INACTIVE",
"status": "INVALID",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameBasic,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "INACTIVE",
Status: "INVALID",
IsCurrent: false,
Features: model.FeatureSet{},
},

View File

@@ -1,5 +1,7 @@
package model
import "github.com/SigNoz/signoz/pkg/types"
type User struct {
Id string `json:"id" db:"id"`
Name string `json:"name" db:"name"`
@@ -16,17 +18,8 @@ type CreatePATRequestBody struct {
}
type PAT struct {
Id string `json:"id" db:"id"`
UserID string `json:"userId" db:"user_id"`
CreatedByUser User `json:"createdByUser"`
UpdatedByUser User `json:"updatedByUser"`
Token string `json:"token" db:"token"`
Role string `json:"role" db:"role"`
Name string `json:"name" db:"name"`
CreatedAt int64 `json:"createdAt" db:"created_at"`
ExpiresAt int64 `json:"expiresAt" db:"expires_at"`
UpdatedAt int64 `json:"updatedAt" db:"updated_at"`
LastUsed int64 `json:"lastUsed" db:"last_used"`
Revoked bool `json:"revoked" db:"revoked"`
UpdatedByUserID string `json:"updatedByUserId" db:"updated_by_user_id"`
CreatedByUser User `json:"createdByUser"`
UpdatedByUser User `json:"updatedByUser"`
types.StorablePersonalAccessToken
}

View File

@@ -1,8 +1,8 @@
package model
import (
"go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
const SSO = "SSO"

View File

@@ -11,22 +11,22 @@ import (
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/anomaly"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/model"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.signoz.io/signoz/pkg/query-service/utils/times"
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
"go.signoz.io/signoz/pkg/query-service/formatter"
"github.com/SigNoz/signoz/pkg/query-service/formatter"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
yaml "gopkg.in/yaml.v2"
)

View File

@@ -5,10 +5,10 @@ import (
"fmt"
"time"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/google/uuid"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
@@ -28,6 +28,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -48,6 +49,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -68,6 +70,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.Reader,
opts.Cache,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
return task, err
@@ -126,6 +129,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.UseTraceNewSchema,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -144,6 +148,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.ManagerOpts.PqlEngine,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
@@ -160,6 +165,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.Cache,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),
)
if err != nil {
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"strings"
"github.com/SigNoz/signoz/pkg/query-service/constants"
saml2 "github.com/russellhaering/gosaml2"
dsig "github.com/russellhaering/goxmldsig"
"go.signoz.io/signoz/pkg/query-service/constants"
"go.uber.org/zap"
)

View File

@@ -15,11 +15,11 @@ import (
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/dao"
licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils/encryption"
"github.com/SigNoz/signoz/ee/query-service/dao"
licenseserver "github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
"github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/utils/encryption"
)
const (
@@ -165,7 +165,7 @@ func (lm *Manager) UploadUsage() {
usageData.CollectorID = usage.CollectorID
usageData.ExporterID = usage.ExporterID
usageData.Type = usage.Type
usageData.Tenant = usage.Tenant
usageData.Tenant = "default"
usageData.OrgName = orgName
usageData.TenantId = lm.tenantID
usagesPayload = append(usagesPayload, usageData)

View File

@@ -1,4 +1,4 @@
package model
package types
import (
"encoding/json"
@@ -6,15 +6,26 @@ import (
"net/url"
"strings"
"github.com/SigNoz/signoz/ee/query-service/sso"
"github.com/SigNoz/signoz/ee/query-service/sso/saml"
"github.com/SigNoz/signoz/pkg/types"
"github.com/google/uuid"
"github.com/pkg/errors"
saml2 "github.com/russellhaering/gosaml2"
"go.signoz.io/signoz/ee/query-service/sso"
"go.signoz.io/signoz/ee/query-service/sso/saml"
"go.signoz.io/signoz/pkg/types"
"github.com/uptrace/bun"
"go.uber.org/zap"
)
type StorableOrgDomain struct {
bun.BaseModel `bun:"table:org_domains"`
types.TimeAuditable
ID uuid.UUID `json:"id" bun:"id,pk,type:text"`
OrgID string `json:"orgId" bun:"org_id,type:text,notnull"`
Name string `json:"name" bun:"name,type:varchar(50),notnull,unique"`
Data string `json:"-" bun:"data,type:text,notnull"`
}
type SSOType string
const (
@@ -22,13 +33,12 @@ const (
GoogleAuth SSOType = "GOOGLE_AUTH"
)
// OrgDomain identify org owned web domains for auth and other purposes
type OrgDomain struct {
Id uuid.UUID `json:"id"`
Name string `json:"name"`
OrgId string `json:"orgId"`
SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"`
// GettableOrgDomain identify org owned web domains for auth and other purposes
type GettableOrgDomain struct {
StorableOrgDomain
SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"`
SamlConfig *SamlConfig `json:"samlConfig"`
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
@@ -36,18 +46,18 @@ type OrgDomain struct {
Org *types.Organization
}
func (od *OrgDomain) String() string {
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.Id.String(), od.SsoType)
func (od *GettableOrgDomain) String() string {
return fmt.Sprintf("[%s]%s-%s ", od.Name, od.ID.String(), od.SsoType)
}
// Valid is used a pipeline function to check if org domain
// loaded from db is valid
func (od *OrgDomain) Valid(err error) error {
func (od *GettableOrgDomain) Valid(err error) error {
if err != nil {
return err
}
if od.Id == uuid.Nil || od.OrgId == "" {
if od.ID == uuid.Nil || od.OrgID == "" {
return fmt.Errorf("both id and orgId are required")
}
@@ -55,9 +65,9 @@ func (od *OrgDomain) Valid(err error) error {
}
// ValidNew cheks if the org domain is valid for insertion in db
func (od *OrgDomain) ValidNew() error {
func (od *GettableOrgDomain) ValidNew() error {
if od.OrgId == "" {
if od.OrgID == "" {
return fmt.Errorf("orgId is required")
}
@@ -69,7 +79,7 @@ func (od *OrgDomain) ValidNew() error {
}
// LoadConfig loads config params from json text
func (od *OrgDomain) LoadConfig(jsondata string) error {
func (od *GettableOrgDomain) LoadConfig(jsondata string) error {
d := *od
err := json.Unmarshal([]byte(jsondata), &d)
if err != nil {
@@ -79,21 +89,21 @@ func (od *OrgDomain) LoadConfig(jsondata string) error {
return nil
}
func (od *OrgDomain) GetSAMLEntityID() string {
func (od *GettableOrgDomain) GetSAMLEntityID() string {
if od.SamlConfig != nil {
return od.SamlConfig.SamlEntity
}
return ""
}
func (od *OrgDomain) GetSAMLIdpURL() string {
func (od *GettableOrgDomain) GetSAMLIdpURL() string {
if od.SamlConfig != nil {
return od.SamlConfig.SamlIdp
}
return ""
}
func (od *OrgDomain) GetSAMLCert() string {
func (od *GettableOrgDomain) GetSAMLCert() string {
if od.SamlConfig != nil {
return od.SamlConfig.SamlCert
}
@@ -102,7 +112,7 @@ func (od *OrgDomain) GetSAMLCert() string {
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
// requesting OAuth and also used in processing response from google
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
func (od *GettableOrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
if od.GoogleAuthConfig == nil {
return nil, fmt.Errorf("GOOGLE OAUTH is not setup correctly for this domain")
}
@@ -111,7 +121,7 @@ func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCall
}
// PrepareSamlRequest creates a request accordingly gosaml2
func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
func (od *GettableOrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
// this is the url Idp will call after login completion
acs := fmt.Sprintf("%s://%s/%s",
@@ -136,9 +146,9 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
return saml.PrepareRequest(issuer, acs, sourceUrl, od.GetSAMLEntityID(), od.GetSAMLIdpURL(), od.GetSAMLCert())
}
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
func (od *GettableOrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
fmtDomainId := strings.Replace(od.ID.String(), "-", ":", -1)
// build redirect url from window.location sent by frontend
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)

View File

@@ -1,30 +1,28 @@
package model
package types
import (
"fmt"
"context"
"fmt"
"net/url"
"golang.org/x/oauth2"
"github.com/SigNoz/signoz/ee/query-service/sso"
"github.com/coreos/go-oidc/v3/oidc"
"go.signoz.io/signoz/ee/query-service/sso"
"golang.org/x/oauth2"
)
// SamlConfig contans SAML params to generate and respond to the requests
// from SAML provider
type SamlConfig struct {
SamlEntity string `json:"samlEntity"`
SamlIdp string `json:"samlIdp"`
SamlCert string `json:"samlCert"`
}
// GoogleOauthConfig contains a generic config to support oauth
// GoogleOauthConfig contains a generic config to support oauth
type GoogleOAuthConfig struct {
ClientID string `json:"clientId"`
ClientSecret string `json:"clientSecret"`
RedirectURI string `json:"redirectURI"`
}
const (
googleIssuerURL = "https://accounts.google.com"
)
@@ -40,7 +38,7 @@ func (g *GoogleOAuthConfig) GetProvider(domain string, siteUrl *url.URL) (sso.OA
}
// default to email and profile scope as we just use google auth
// to verify identity and start a session.
// to verify identity and start a session.
scopes := []string{"email"}
// this is the url google will call after login completion
@@ -61,8 +59,7 @@ func (g *GoogleOAuthConfig) GetProvider(domain string, siteUrl *url.URL) (sso.OA
Verifier: provider.Verifier(
&oidc.Config{ClientID: g.ClientID},
),
Cancel: cancel,
HostedDomain: domain,
Cancel: cancel,
HostedDomain: domain,
}, nil
}

Some files were not shown because too many files have changed in this diff Show More